[PATCH 5/6] wined3d: Add a separate function for guessing the card.

Henri Verbeet hverbeet at codeweavers.com
Fri Aug 14 02:23:24 CDT 2009


---
 dlls/wined3d/directx.c |  888 +++++++++++++++++++++++++-----------------------
 1 files changed, 464 insertions(+), 424 deletions(-)

diff --git a/dlls/wined3d/directx.c b/dlls/wined3d/directx.c
index 9e5609d..56c9507 100644
--- a/dlls/wined3d/directx.c
+++ b/dlls/wined3d/directx.c
@@ -1085,6 +1085,469 @@ static GL_Vendors wined3d_guess_vendor(const char *gl_vendor, const char *gl_ren
     return VENDOR_WINE;
 }
 
+static GL_Cards wined3d_guess_card(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
+        GL_Vendors *vendor, unsigned int *vidmem)
+{
+    /* Below is a list of Nvidia and ATI GPUs. Both vendors have dozens of
+     * different GPUs with roughly the same features. In most cases GPUs from a
+     * certain family differ in clockspeeds, the amount of video memory and the
+     * number of shader pipelines.
+     *
+     * A Direct3D device object contains the PCI id (vendor + device) of the
+     * videocard which is used for rendering. Various applications use this
+     * information to get a rough estimation of the features of the card and
+     * some might use it for enabling 3d effects only on certain types of
+     * videocards. In some cases games might even use it to work around bugs
+     * which happen on certain videocards/driver combinations. The problem is
+     * that OpenGL only exposes a rendering string containing the name of the
+     * videocard and not the PCI id.
+     *
+     * Various games depend on the PCI id, so somehow we need to provide one.
+     * A simple option is to parse the renderer string and translate this to
+     * the right PCI id. This is a lot of work because there are more than 200
+     * GPUs just for Nvidia. Various cards share the same renderer string, so
+     * the amount of code might be 'small' but there are quite a number of
+     * exceptions which would make this a pain to maintain. Another way would
+     * be to query the PCI id from the operating system (assuming this is the
+     * videocard which is used for rendering which is not always the case).
+     * This would work but it is not very portable. Second it would not work
+     * well in, let's say, a remote X situation in which the amount of 3d
+     * features which can be used is limited.
+     *
+     * As said most games only use the PCI id to get an indication of the
+     * capabilities of the card. It doesn't really matter if the given id is
+     * the correct one if we return the id of a card with similar 3d features.
+     *
+     * The code below checks the OpenGL capabilities of a videocard and matches
+     * that to a certain level of Direct3D functionality. Once a card passes
+     * the Direct3D9 check, we know that the card (in case of Nvidia) is at
+     * least a GeforceFX. To give a better estimate we do a basic check on the
+     * renderer string but if that won't pass we return a default card. This
+     * way is better than maintaining a full card database as even without a
+     * full database we can return a card with similar features. Second the
+     * size of the database can be made quite small because when you know what
+     * type of 3d functionality a card has, you know to which GPU family the
+     * GPU must belong. Because of this you only have to check a small part of
+     * the renderer string to distinguishes between different models from that
+     * family.
+     *
+     * The code also selects a default amount of video memory which we will
+     * use for an estimation of the amount of free texture memory. In case of
+     * real D3D the amount of texture memory includes video memory and system
+     * memory (to be specific AGP memory or in case of PCIE TurboCache /
+     * HyperMemory). We don't know how much system memory can be addressed by
+     * the system but we can make a reasonable estimation about the amount of
+     * video memory. If the value is slightly wrong it doesn't matter as we
+     * didn't include AGP-like memory which makes the amount of addressable
+     * memory higher and second OpenGL isn't that critical it moves to system
+     * memory behind our backs if really needed. Note that the amount of video
+     * memory can be overruled using a registry setting. */
+
+    switch (*vendor)
+    {
+        case VENDOR_NVIDIA:
+            /* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
+             * shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
+             */
+            if (WINE_D3D9_CAPABLE(gl_info) && gl_info->supported[NV_VERTEX_PROGRAM3])
+            {
+                /* Geforce 200 - highend */
+                if (strstr(gl_renderer, "GTX 280")
+                        || strstr(gl_renderer, "GTX 285")
+                        || strstr(gl_renderer, "GTX 295"))
+                {
+                    *vidmem = 1024;
+                    return CARD_NVIDIA_GEFORCE_GTX280;
+                }
+
+                /* Geforce 200 - midend high */
+                if (strstr(gl_renderer, "GTX 275"))
+                {
+                    *vidmem = 896;
+                    return CARD_NVIDIA_GEFORCE_GTX275;
+                }
+
+                /* Geforce 200 - midend */
+                if (strstr(gl_renderer, "GTX 260"))
+                {
+                    *vidmem = 1024;
+                    return CARD_NVIDIA_GEFORCE_GTX260;
+                }
+
+                /* Geforce9 - highend / Geforce 200 - midend (GTS 150/250 are based on the same core) */
+                if (strstr(gl_renderer, "9800")
+                        || strstr(gl_renderer, "GTS 150")
+                        || strstr(gl_renderer, "GTS 250"))
+                {
+                    *vidmem = 512;
+                    return CARD_NVIDIA_GEFORCE_9800GT;
+                }
+
+                /* Geforce9 - midend */
+                if (strstr(gl_renderer, "9600"))
+                {
+                    *vidmem = 384; /* The 9600GSO has 384MB, the 9600GT has 512-1024MB */
+                    return CARD_NVIDIA_GEFORCE_9600GT;
+                }
+
+                /* Geforce9 - midend low / Geforce 200 - low */
+                if (strstr(gl_renderer, "9500")
+                        || strstr(gl_renderer, "GT 120")
+                        || strstr(gl_renderer, "GT 130"))
+                {
+                    *vidmem = 256; /* The 9500GT has 256-1024MB */
+                    return CARD_NVIDIA_GEFORCE_9500GT;
+                }
+
+                /* Geforce9 - lowend */
+                if (strstr(gl_renderer, "9400"))
+                {
+                    *vidmem = 256; /* The 9400GT has 256-1024MB */
+                    return CARD_NVIDIA_GEFORCE_9400GT;
+                }
+
+                /* Geforce9 - lowend low */
+                if (strstr(gl_renderer, "9100")
+                        || strstr(gl_renderer, "9200")
+                        || strstr(gl_renderer, "9300")
+                        || strstr(gl_renderer, "G 100"))
+                {
+                    *vidmem = 256; /* The 9100-9300 cards have 256MB */
+                    return CARD_NVIDIA_GEFORCE_9200;
+                }
+
+                /* Geforce8 - highend */
+                if (strstr(gl_renderer, "8800"))
+                {
+                    *vidmem = 320; /* The 8800GTS uses 320MB, a 8800GTX can have 768MB */
+                    return CARD_NVIDIA_GEFORCE_8800GTS;
+                }
+
+                /* Geforce8 - midend mobile */
+                if (strstr(gl_renderer, "8600 M"))
+                {
+                    *vidmem = 512;
+                    return CARD_NVIDIA_GEFORCE_8600MGT;
+                }
+
+                /* Geforce8 - midend */
+                if (strstr(gl_renderer, "8600")
+                        || strstr(gl_renderer, "8700"))
+                {
+                    *vidmem = 256;
+                    return CARD_NVIDIA_GEFORCE_8600GT;
+                }
+
+                /* Geforce8 - lowend */
+                if (strstr(gl_renderer, "8300")
+                        || strstr(gl_renderer, "8400")
+                        || strstr(gl_renderer, "8500"))
+                {
+                    *vidmem = 128; /* 128-256MB for a 8300, 256-512MB for a 8400 */
+                    return CARD_NVIDIA_GEFORCE_8300GS;
+                }
+
+                /* Geforce7 - highend */
+                if (strstr(gl_renderer, "7800")
+                        || strstr(gl_renderer, "7900")
+                        || strstr(gl_renderer, "7950")
+                        || strstr(gl_renderer, "Quadro FX 4")
+                        || strstr(gl_renderer, "Quadro FX 5"))
+                {
+                    *vidmem = 256; /* A 7800GT uses 256MB while highend 7900 cards can use 512MB */
+                    return CARD_NVIDIA_GEFORCE_7800GT;
+                }
+
+                /* Geforce7 midend */
+                if (strstr(gl_renderer, "7600")
+                        || strstr(gl_renderer, "7700"))
+                {
+                    *vidmem = 256; /* The 7600 uses 256-512MB */
+                    return CARD_NVIDIA_GEFORCE_7600;
+                }
+
+                /* Geforce7 lower medium */
+                if (strstr(gl_renderer, "7400"))
+                {
+                    *vidmem = 256; /* The 7400 uses 256-512MB */
+                    return CARD_NVIDIA_GEFORCE_7400;
+                }
+
+                /* Geforce7 lowend */
+                if (strstr(gl_renderer, "7300"))
+                {
+                    *vidmem = 256; /* Mac Pros with this card have 256 MB */
+                    return CARD_NVIDIA_GEFORCE_7300;
+                }
+
+                /* Geforce6 highend */
+                if (strstr(gl_renderer, "6800"))
+                {
+                    *vidmem = 128; /* The 6800 uses 128-256MB, the 7600 uses 256-512MB */
+                    return CARD_NVIDIA_GEFORCE_6800;
+                }
+
+                /* Geforce6 - midend */
+                if (strstr(gl_renderer, "6600")
+                        || strstr(gl_renderer, "6610")
+                        || strstr(gl_renderer, "6700"))
+                {
+                    *vidmem = 128; /* A 6600GT has 128-256MB */
+                    return CARD_NVIDIA_GEFORCE_6600GT;
+                }
+
+                /* Geforce6/7 lowend */
+                *vidmem = 64; /* */
+                return CARD_NVIDIA_GEFORCE_6200; /* Geforce 6100/6150/6200/7300/7400/7500 */
+            }
+
+            if (WINE_D3D9_CAPABLE(gl_info))
+            {
+                /* GeforceFX - highend */
+                if (strstr(gl_renderer, "5800")
+                        || strstr(gl_renderer, "5900")
+                        || strstr(gl_renderer, "5950")
+                        || strstr(gl_renderer, "Quadro FX"))
+                {
+                    *vidmem = 256; /* 5800-5900 cards use 256MB */
+                    return CARD_NVIDIA_GEFORCEFX_5800;
+                }
+
+                /* GeforceFX - midend */
+                if (strstr(gl_renderer, "5600")
+                        || strstr(gl_renderer, "5650")
+                        || strstr(gl_renderer, "5700")
+                        || strstr(gl_renderer, "5750"))
+                {
+                    *vidmem = 128; /* A 5600 uses 128-256MB */
+                    return CARD_NVIDIA_GEFORCEFX_5600;
+                }
+
+                /* GeforceFX - lowend */
+                *vidmem = 64; /* Normal FX5200 cards use 64-256MB; laptop (non-standard) can have less */
+                return CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
+            }
+
+            if (WINE_D3D8_CAPABLE(gl_info))
+            {
+                if (strstr(gl_renderer, "GeForce4 Ti") || strstr(gl_renderer, "Quadro4"))
+                {
+                    *vidmem = 64; /* Geforce4 Ti cards have 64-128MB */
+                    return CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
+                }
+
+                *vidmem = 64; /* Geforce3 cards have 64-128MB */
+                return CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
+            }
+
+            if (WINE_D3D7_CAPABLE(gl_info))
+            {
+                if (strstr(gl_renderer, "GeForce4 MX"))
+                {
+                    /* Most Geforce4MX GPUs have at least 64MB of memory, some
+                     * early models had 32MB but most have 64MB or even 128MB. */
+                    *vidmem = 64;
+                    return CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
+                }
+
+                if (strstr(gl_renderer, "GeForce2 MX") || strstr(gl_renderer, "Quadro2 MXR"))
+                {
+                    *vidmem = 32; /* Geforce2MX GPUs have 32-64MB of video memory */
+                    return CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
+                }
+
+                if (strstr(gl_renderer, "GeForce2") || strstr(gl_renderer, "Quadro2"))
+                {
+                    *vidmem = 32; /* Geforce2 GPUs have 32-64MB of video memory */
+                    return CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
+                }
+
+                /* Most Geforce1 cards have 32MB, there are also some rare 16
+                 * and 64MB (Dell) models. */
+                *vidmem = 32;
+                return CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
+            }
+
+            if (strstr(gl_renderer, "TNT2"))
+            {
+                *vidmem = 32; /* Most TNT2 boards have 32MB, though there are 16MB boards too */
+                return CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
+            }
+
+            *vidmem = 16; /* Most TNT boards have 16MB, some rare models have 8MB */
+            return CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
+
+        case VENDOR_ATI:
+            /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
+             *
+             * Beware: renderer string do not match exact card model,
+             * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
+            if (WINE_D3D9_CAPABLE(gl_info))
+            {
+                /* Radeon R7xx HD4800 - highend */
+                if (strstr(gl_renderer, "HD 4800")          /* Radeon RV7xx HD48xx generic renderer string */
+                        || strstr(gl_renderer, "HD 4830")   /* Radeon RV770 */
+                        || strstr(gl_renderer, "HD 4850")   /* Radeon RV770 */
+                        || strstr(gl_renderer, "HD 4870")   /* Radeon RV770 */
+                        || strstr(gl_renderer, "HD 4890"))  /* Radeon RV790 */
+                {
+                    *vidmem = 512; /* note: HD4890 cards use 1024MB */
+                    return CARD_ATI_RADEON_HD4800;
+                }
+
+                /* Radeon R740 HD4700 - midend */
+                if (strstr(gl_renderer, "HD 4700")          /* Radeon RV770 */
+                        || strstr(gl_renderer, "HD 4770"))  /* Radeon RV740 */
+                {
+                    *vidmem = 512;
+                    return CARD_ATI_RADEON_HD4700;
+                }
+
+                /* Radeon R730 HD4600 - midend */
+                if (strstr(gl_renderer, "HD 4600")          /* Radeon RV730 */
+                        || strstr(gl_renderer, "HD 4650")   /* Radeon RV730 */
+                        || strstr(gl_renderer, "HD 4670"))  /* Radeon RV730 */
+                {
+                    *vidmem = 512;
+                    return CARD_ATI_RADEON_HD4600;
+                }
+
+                /* Radeon R710 HD4500/HD4350 - lowend */
+                if (strstr(gl_renderer, "HD 4350")          /* Radeon RV710 */
+                        || strstr(gl_renderer, "HD 4550"))  /* Radeon RV710 */
+                {
+                    *vidmem = 256;
+                    return CARD_ATI_RADEON_HD4350;
+                }
+
+                /* Radeon R6xx HD2900/HD3800 - highend */
+                if (strstr(gl_renderer, "HD 2900")
+                        || strstr(gl_renderer, "HD 3870")
+                        || strstr(gl_renderer, "HD 3850"))
+                {
+                    *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
+                    return CARD_ATI_RADEON_HD2900;
+                }
+
+                /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
+                if (strstr(gl_renderer, "HD 2600")
+                        || strstr(gl_renderer, "HD 3830")
+                        || strstr(gl_renderer, "HD 3690")
+                        || strstr(gl_renderer, "HD 3650"))
+                {
+                    *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
+                    return CARD_ATI_RADEON_HD2600;
+                }
+
+                /* Radeon R6xx HD2300/HD2400/HD3400 - lowend */
+                if (strstr(gl_renderer, "HD 2300")
+                        || strstr(gl_renderer, "HD 2400")
+                        || strstr(gl_renderer, "HD 3470")
+                        || strstr(gl_renderer, "HD 3450")
+                        || strstr(gl_renderer, "HD 3430")
+                        || strstr(gl_renderer, "HD 3400"))
+                {
+                    *vidmem = 128; /* HD2300 uses at least 128MB, HD2400 uses 256MB */
+                    return CARD_ATI_RADEON_HD2300;
+                }
+
+                /* Radeon R6xx/R7xx integrated */
+                if (strstr(gl_renderer, "HD 3100")
+                        || strstr(gl_renderer, "HD 3200")
+                        || strstr(gl_renderer, "HD 3300"))
+                {
+                    *vidmem = 128; /* 128MB */
+                    return CARD_ATI_RADEON_HD3200;
+                }
+
+                /* Radeon R5xx */
+                if (strstr(gl_renderer, "X1600")
+                        || strstr(gl_renderer, "X1650")
+                        || strstr(gl_renderer, "X1800")
+                        || strstr(gl_renderer, "X1900")
+                        || strstr(gl_renderer, "X1950"))
+                {
+                    *vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
+                    return CARD_ATI_RADEON_X1600;
+                }
+
+                /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300 (lowend R5xx) */
+                if (strstr(gl_renderer, "X700")
+                        || strstr(gl_renderer, "X800")
+                        || strstr(gl_renderer, "X850")
+                        || strstr(gl_renderer, "X1300")
+                        || strstr(gl_renderer, "X1400")
+                        || strstr(gl_renderer, "X1450")
+                        || strstr(gl_renderer, "X1550"))
+                {
+                    *vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
+                    return CARD_ATI_RADEON_X700;
+                }
+
+                /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
+                if (strstr(gl_renderer, "Radeon Xpress"))
+                {
+                    *vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
+                    return CARD_ATI_RADEON_XPRESS_200M;
+                }
+
+                /* Radeon R3xx */
+                *vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
+                return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
+            }
+
+            if (WINE_D3D8_CAPABLE(gl_info))
+            {
+                *vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
+                return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
+            }
+
+            if (WINE_D3D7_CAPABLE(gl_info))
+            {
+                *vidmem = 32; /* There are models with up to 64MB */
+                return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
+            }
+
+            *vidmem = 16; /* There are 16-32MB models */
+            return CARD_ATI_RAGE_128PRO;
+
+        case VENDOR_INTEL:
+            if (strstr(gl_renderer, "X3100"))
+            {
+                /* MacOS calls the card GMA X3100, Google findings also suggest the name GM965 */
+                *vidmem = 128;
+                return CARD_INTEL_X3100;
+            }
+
+            if (strstr(gl_renderer, "GMA 950") || strstr(gl_renderer, "945GM"))
+            {
+                /* MacOS calls the card GMA 950, but everywhere else the PCI ID is named 945GM */
+                *vidmem = 64;
+                return CARD_INTEL_I945GM;
+            }
+
+            if (strstr(gl_renderer, "915GM")) return CARD_INTEL_I915GM;
+            if (strstr(gl_renderer, "915G")) return CARD_INTEL_I915G;
+            if (strstr(gl_renderer, "865G")) return CARD_INTEL_I865G;
+            if (strstr(gl_renderer, "855G")) return CARD_INTEL_I855G;
+            if (strstr(gl_renderer, "830G")) return CARD_INTEL_I830G;
+            return CARD_INTEL_I915G;
+
+        case VENDOR_MESA:
+        case VENDOR_WINE:
+        default:
+            /* Default to generic Nvidia hardware based on the supported OpenGL extensions. The choice
+             * for Nvidia was because the hardware and drivers they make are of good quality. This makes
+             * them a good generic choice. */
+            *vendor = VENDOR_NVIDIA;
+            if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCEFX_5600;
+            if (WINE_D3D8_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE3;
+            if (WINE_D3D7_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE;
+            if (WINE_D3D6_CAPABLE(gl_info)) return CARD_NVIDIA_RIVA_TNT;
+            return CARD_NVIDIA_RIVA_128;
+    }
+}
+
 /* Context activation is done by the caller. */
 static BOOL IWineD3DImpl_FillGLCaps(struct wined3d_gl_info *gl_info)
 {
@@ -1501,430 +1964,7 @@ static BOOL IWineD3DImpl_FillGLCaps(struct wined3d_gl_info *gl_info)
         gl_info->max_buffers = 1;
     }
 
-    /* Below is a list of Nvidia and ATI GPUs. Both vendors have dozens of different GPUs with roughly the same
-     * features. In most cases GPUs from a certain family differ in clockspeeds, the amount of video memory and
-     * in case of the latest videocards in the number of pixel/vertex pipelines.
-     *
-     * A Direct3D device object contains the PCI id (vendor + device) of the videocard which is used for
-     * rendering. Various games use this information to get a rough estimation of the features of the card
-     * and some might use it for enabling 3d effects only on certain types of videocards. In some cases
-     * games might even use it to work around bugs which happen on certain videocards/driver combinations.
-     * The problem is that OpenGL only exposes a rendering string containing the name of the videocard and
-     * not the PCI id.
-     *
-     * Various games depend on the PCI id, so somehow we need to provide one. A simple option is to parse
-     * the renderer string and translate this to the right PCI id. This is a lot of work because there are more
-     * than 200 GPUs just for Nvidia. Various cards share the same renderer string, so the amount of code might
-     * be 'small' but there are quite a number of exceptions which would make this a pain to maintain.
-     * Another way would be to query the PCI id from the operating system (assuming this is the videocard which
-     * is used for rendering which is not always the case). This would work but it is not very portable. Second
-     * it would not work well in, let's say, a remote X situation in which the amount of 3d features which can be used
-     * is limited.
-     *
-     * As said most games only use the PCI id to get an indication of the capabilities of the card.
-     * It doesn't really matter if the given id is the correct one if we return the id of a card with
-     * similar 3d features.
-     *
-     * The code below checks the OpenGL capabilities of a videocard and matches that to a certain level of
-     * Direct3D functionality. Once a card passes the Direct3D9 check, we know that the card (in case of Nvidia)
-     * is at least a GeforceFX. To give a better estimate we do a basic check on the renderer string but if that
-     * won't pass we return a default card. This way is better than maintaining a full card database as even
-     * without a full database we can return a card with similar features. Second the size of the database
-     * can be made quite small because when you know what type of 3d functionality a card has, you know to which
-     * GPU family the GPU must belong. Because of this you only have to check a small part of the renderer string
-     * to distinguishes between different models from that family.
-     *
-     * The code also selects a default amount of video memory which we will use for an estimation of the amount
-     * of free texture memory. In case of real D3D the amount of texture memory includes video memory and system
-     * memory (to be specific AGP memory or in case of PCIE TurboCache/HyperMemory). We don't know how much
-     * system memory can be addressed by the system but we can make a reasonable estimation about the amount of
-     * video memory. If the value is slightly wrong it doesn't matter as we didn't include AGP-like memory which
-     * makes the amount of addressable memory higher and second OpenGL isn't that critical it moves to system
-     * memory behind our backs if really needed.
-     * Note that the amount of video memory can be overruled using a registry setting.
-     */
-    switch (gl_info->gl_vendor) {
-        case VENDOR_NVIDIA:
-            /* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
-             * shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
-             */
-            if (WINE_D3D9_CAPABLE(gl_info) && gl_info->supported[NV_VERTEX_PROGRAM3])
-            {
-                /* Geforce 200 - highend */
-                if (strstr(gl_renderer, "GTX 280")
-                        || strstr(gl_renderer, "GTX 285")
-                        || strstr(gl_renderer, "GTX 295"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_GTX280;
-                    vidmem = 1024;
-                }
-                /* Geforce 200 - midend high */
-                else if (strstr(gl_renderer, "GTX 275"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_GTX275;
-                    vidmem = 896;
-                }
-                /* Geforce 200 - midend */
-                else if (strstr(gl_renderer, "GTX 260"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_GTX260;
-                    vidmem = 1024;
-                }
-                /* Geforce9 - highend / Geforce 200 - midend (GTS 150/250 are based on the same core) */
-                else if (strstr(gl_renderer, "9800")
-                        || strstr(gl_renderer, "GTS 150")
-                        || strstr(gl_renderer, "GTS 250"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_9800GT;
-                    vidmem = 512;
-                }
-                /* Geforce9 - midend */
-                else if (strstr(gl_renderer, "9600"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_9600GT;
-                    vidmem = 384; /* The 9600GSO has 384MB, the 9600GT has 512-1024MB */
-                }
-                /* Geforce9 - midend low / Geforce 200 - low*/
-                else if (strstr(gl_renderer, "9500")
-                        || strstr(gl_renderer, "GT 120")
-                        || strstr(gl_renderer, "GT 130"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_9500GT;
-                    vidmem = 256; /* The 9500GT has 256-1024MB */
-                }
-                /* Geforce9 - lowend */
-                else if (strstr(gl_renderer, "9400"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_9400GT;
-                    vidmem = 256; /* The 9400GT has 256-1024MB */
-                }
-                /* Geforce9 - lowend low */
-                else if (strstr(gl_renderer, "9100")
-                        || strstr(gl_renderer, "9200")
-                        || strstr(gl_renderer, "9300")
-                        || strstr(gl_renderer, "G 100"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_9200;
-                    vidmem = 256; /* The 9100-9300 cards have 256MB */
-                }
-                /* Geforce8 - highend */
-                else if (strstr(gl_renderer, "8800"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_8800GTS;
-                    vidmem = 320; /* The 8800GTS uses 320MB, a 8800GTX can have 768MB */
-                }
-                /* Geforce8 - midend mobile */
-                else if (strstr(gl_renderer, "8600 M"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_8600MGT;
-                    vidmem = 512;
-                }
-                /* Geforce8 - midend */
-                else if (strstr(gl_renderer, "8600")
-                        || strstr(gl_renderer, "8700"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_8600GT;
-                    vidmem = 256;
-                }
-                /* Geforce8 - lowend */
-                else if (strstr(gl_renderer, "8300")
-                        || strstr(gl_renderer, "8400")
-                        || strstr(gl_renderer, "8500"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_8300GS;
-                    vidmem = 128; /* 128-256MB for a 8300, 256-512MB for a 8400 */
-                }
-                /* Geforce7 - highend */
-                else if (strstr(gl_renderer, "7800")
-                        || strstr(gl_renderer, "7900")
-                        || strstr(gl_renderer, "7950")
-                        || strstr(gl_renderer, "Quadro FX 4")
-                        || strstr(gl_renderer, "Quadro FX 5"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_7800GT;
-                    vidmem = 256; /* A 7800GT uses 256MB while highend 7900 cards can use 512MB */
-                }
-                /* Geforce7 midend */
-                else if (strstr(gl_renderer, "7600")
-                        || strstr(gl_renderer, "7700"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_7600;
-                    vidmem = 256; /* The 7600 uses 256-512MB */
-                /* Geforce7 lower medium */
-                }
-                else if (strstr(gl_renderer, "7400"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_7400;
-                    vidmem = 256; /* The 7400 uses 256-512MB */
-                }
-                /* Geforce7 lowend */
-                else if (strstr(gl_renderer, "7300"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_7300;
-                    vidmem = 256; /* Mac Pros with this card have 256 MB */
-                }
-                /* Geforce6 highend */
-                else if (strstr(gl_renderer, "6800"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_6800;
-                    vidmem = 128; /* The 6800 uses 128-256MB, the 7600 uses 256-512MB */
-                }
-                /* Geforce6 - midend */
-                else if (strstr(gl_renderer, "6600")
-                        || strstr(gl_renderer, "6610")
-                        || strstr(gl_renderer, "6700"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_6600GT;
-                    vidmem = 128; /* A 6600GT has 128-256MB */
-                }
-                /* Geforce6/7 lowend */
-                else {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE_6200; /* Geforce 6100/6150/6200/7300/7400/7500 */
-                    vidmem = 64; /* */
-                }
-            } else if(WINE_D3D9_CAPABLE(gl_info)) {
-                /* GeforceFX - highend */
-                if (strstr(gl_renderer, "5800")
-                        || strstr(gl_renderer, "5900")
-                        || strstr(gl_renderer, "5950")
-                        || strstr(gl_renderer, "Quadro FX"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCEFX_5800;
-                    vidmem = 256; /* 5800-5900 cards use 256MB */
-                }
-                /* GeforceFX - midend */
-                else if (strstr(gl_renderer, "5600")
-                        || strstr(gl_renderer, "5650")
-                        || strstr(gl_renderer, "5700")
-                        || strstr(gl_renderer, "5750"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCEFX_5600;
-                    vidmem = 128; /* A 5600 uses 128-256MB */
-                }
-                /* GeforceFX - lowend */
-                else {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
-                    vidmem = 64; /* Normal FX5200 cards use 64-256MB; laptop (non-standard) can have less */
-                }
-            } else if(WINE_D3D8_CAPABLE(gl_info)) {
-                if (strstr(gl_renderer, "GeForce4 Ti") || strstr(gl_renderer, "Quadro4"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
-                    vidmem = 64; /* Geforce4 Ti cards have 64-128MB */
-                }
-                else
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
-                    vidmem = 64; /* Geforce3 cards have 64-128MB */
-                }
-            } else if(WINE_D3D7_CAPABLE(gl_info)) {
-                if (strstr(gl_renderer, "GeForce4 MX"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
-                    vidmem = 64; /* Most Geforce4MX GPUs have at least 64MB of memory, some early models had 32MB but most have 64MB or even 128MB */
-                }
-                else if(strstr(gl_renderer, "GeForce2 MX") || strstr(gl_renderer, "Quadro2 MXR"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
-                    vidmem = 32; /* Geforce2MX GPUs have 32-64MB of video memory */
-                }
-                else if(strstr(gl_renderer, "GeForce2") || strstr(gl_renderer, "Quadro2"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
-                    vidmem = 32; /* Geforce2 GPUs have 32-64MB of video memory */
-                }
-                else
-                {
-                    gl_info->gl_card = CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
-                    vidmem = 32; /* Most Geforce1 cards have 32MB, there are also some rare 16 and 64MB (Dell) models */
-                }
-            } else {
-                if (strstr(gl_renderer, "TNT2"))
-                {
-                    gl_info->gl_card = CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
-                    vidmem = 32; /* Most TNT2 boards have 32MB, though there are 16MB boards too */
-                }
-                else
-                {
-                    gl_info->gl_card = CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
-                    vidmem = 16; /* Most TNT boards have 16MB, some rare models have 8MB */
-                }
-            }
-            break;
-        case VENDOR_ATI:
-            /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
-             *
-             * beware: renderer string do not match exact card model,
-             * eg HD 4800 is returned for multiple card, even for RV790 based one
-             */
-            if(WINE_D3D9_CAPABLE(gl_info)) {
-                /* Radeon R7xx HD4800 - highend */
-                if (strstr(gl_renderer, "HD 4800")          /* Radeon RV7xx HD48xx generic renderer string */
-                        || strstr(gl_renderer, "HD 4830")   /* Radeon RV770 */
-                        || strstr(gl_renderer, "HD 4850")   /* Radeon RV770 */
-                        || strstr(gl_renderer, "HD 4870")   /* Radeon RV770 */
-                        || strstr(gl_renderer, "HD 4890"))  /* Radeon RV790 */
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD4800;
-                    vidmem = 512; /* note: HD4890 cards use 1024MB */
-                }
-                /* Radeon R740 HD4700 - midend */
-                else if (strstr(gl_renderer, "HD 4700")     /* Radeon RV770 */
-                        || strstr(gl_renderer, "HD 4770"))  /* Radeon RV740 */
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD4700;
-                    vidmem = 512;
-                }
-                /* Radeon R730 HD4600 - midend */
-                else if (strstr(gl_renderer, "HD 4600")     /* Radeon RV730 */
-                        || strstr(gl_renderer, "HD 4650")   /* Radeon RV730 */
-                        || strstr(gl_renderer, "HD 4670"))  /* Radeon RV730 */
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD4600;
-                    vidmem = 512;
-                }
-                /* Radeon R710 HD4500/HD4350 - lowend */
-                else if (strstr(gl_renderer, "HD 4350")     /* Radeon RV710 */
-                        || strstr(gl_renderer, "HD 4550"))  /* Radeon RV710 */
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD4350;
-                    vidmem = 256;
-                }
-                /* Radeon R6xx HD2900/HD3800 - highend */
-                else if (strstr(gl_renderer, "HD 2900")
-                        || strstr(gl_renderer, "HD 3870")
-                        || strstr(gl_renderer, "HD 3850"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD2900;
-                    vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
-                }
-                /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
-                else if (strstr(gl_renderer, "HD 2600")
-                        || strstr(gl_renderer, "HD 3830")
-                        || strstr(gl_renderer, "HD 3690")
-                        || strstr(gl_renderer, "HD 3650"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD2600;
-                    vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
-                }
-                /* Radeon R6xx HD2300/HD2400/HD3400 - lowend */
-                else if (strstr(gl_renderer, "HD 2300")
-                        || strstr(gl_renderer, "HD 2400")
-                        || strstr(gl_renderer, "HD 3470")
-                        || strstr(gl_renderer, "HD 3450")
-                        || strstr(gl_renderer, "HD 3430")
-                        || strstr(gl_renderer, "HD 3400"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD2300;
-                    vidmem = 128; /* HD2300 uses at least 128MB, HD2400 uses 256MB */
-                }
-                /* Radeon R6xx/R7xx integrated */
-                else if (strstr(gl_renderer, "HD 3100")
-                        || strstr(gl_renderer, "HD 3200")
-                        || strstr(gl_renderer, "HD 3300"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_HD3200;
-                    vidmem = 128; /* 128MB */
-                }
-                /* Radeon R5xx */
-                else if (strstr(gl_renderer, "X1600")
-                        || strstr(gl_renderer, "X1650")
-                        || strstr(gl_renderer, "X1800")
-                        || strstr(gl_renderer, "X1900")
-                        || strstr(gl_renderer, "X1950"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_X1600;
-                    vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
-                }
-                /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300 (lowend R5xx) */
-                else if(strstr(gl_renderer, "X700")
-                        || strstr(gl_renderer, "X800")
-                        || strstr(gl_renderer, "X850")
-                        || strstr(gl_renderer, "X1300")
-                        || strstr(gl_renderer, "X1400")
-                        || strstr(gl_renderer, "X1450")
-                        || strstr(gl_renderer, "X1550"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_X700;
-                    vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
-                }
-                /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
-                else if(strstr(gl_renderer, "Radeon Xpress"))
-                {
-                    gl_info->gl_card = CARD_ATI_RADEON_XPRESS_200M;
-                    vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
-                }
-                /* Radeon R3xx */ 
-                else {
-                    gl_info->gl_card = CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
-                    vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
-                }
-            } else if(WINE_D3D8_CAPABLE(gl_info)) {
-                gl_info->gl_card = CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
-                vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
-            } else if(WINE_D3D7_CAPABLE(gl_info)) {
-                gl_info->gl_card = CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
-                vidmem = 32; /* There are models with up to 64MB */
-            } else {
-                gl_info->gl_card = CARD_ATI_RAGE_128PRO;
-                vidmem = 16; /* There are 16-32MB models */
-            }
-            break;
-        case VENDOR_INTEL:
-            if(strstr(gl_renderer, "X3100"))
-            {
-                /* MacOS calls the card GMA X3100, Google findings also suggest the name GM965 */
-                gl_info->gl_card = CARD_INTEL_X3100;
-                vidmem = 128;
-            }
-            else if (strstr(gl_renderer, "GMA 950") || strstr(gl_renderer, "945GM"))
-            {
-                /* MacOS calls the card GMA 950, but everywhere else the PCI ID is named 945GM */
-                gl_info->gl_card = CARD_INTEL_I945GM;
-                vidmem = 64;
-            }
-            else if (strstr(gl_renderer, "915GM"))
-            {
-                gl_info->gl_card = CARD_INTEL_I915GM;
-            }
-            else if (strstr(gl_renderer, "915G"))
-            {
-                gl_info->gl_card = CARD_INTEL_I915G;
-            }
-            else if (strstr(gl_renderer, "865G"))
-            {
-                gl_info->gl_card = CARD_INTEL_I865G;
-            }
-            else if (strstr(gl_renderer, "855G"))
-            {
-                gl_info->gl_card = CARD_INTEL_I855G;
-            }
-            else if (strstr(gl_renderer, "830G"))
-            {
-                gl_info->gl_card = CARD_INTEL_I830G;
-            } else {
-                gl_info->gl_card = CARD_INTEL_I915G;
-            }
-            break;
-        case VENDOR_MESA:
-        case VENDOR_WINE:
-        default:
-            /* Default to generic Nvidia hardware based on the supported OpenGL extensions. The choice 
-             * for Nvidia was because the hardware and drivers they make are of good quality. This makes
-             * them a good generic choice.
-             */
-            gl_info->gl_vendor = VENDOR_NVIDIA;
-            if(WINE_D3D9_CAPABLE(gl_info))
-                gl_info->gl_card = CARD_NVIDIA_GEFORCEFX_5600;
-            else if(WINE_D3D8_CAPABLE(gl_info))
-                gl_info->gl_card = CARD_NVIDIA_GEFORCE3;
-            else if(WINE_D3D7_CAPABLE(gl_info))
-                gl_info->gl_card = CARD_NVIDIA_GEFORCE;
-            else if(WINE_D3D6_CAPABLE(gl_info))
-                gl_info->gl_card = CARD_NVIDIA_RIVA_TNT;
-            else
-                gl_info->gl_card = CARD_NVIDIA_RIVA_128;
-    }
+    gl_info->gl_card = wined3d_guess_card(gl_info, gl_renderer, &gl_info->gl_vendor, &vidmem);
     TRACE_(d3d_caps)("FOUND (fake) card: 0x%x (vendor id), 0x%x (device id)\n", gl_info->gl_vendor, gl_info->gl_card);
 
     /* If we have an estimate use it, else default to 64MB;  */
-- 
1.6.0.6




More information about the wine-patches mailing list