1 | /*
|
---|
2 | * IWineD3D implementation
|
---|
3 | *
|
---|
4 | * Copyright 2002-2004 Jason Edmeades
|
---|
5 | * Copyright 2003-2004 Raphael Junqueira
|
---|
6 | * Copyright 2004 Christian Costa
|
---|
7 | * Copyright 2005 Oliver Stieber
|
---|
8 | * Copyright 2007-2008 Stefan Dösinger for CodeWeavers
|
---|
9 | * Copyright 2009 Henri Verbeet for CodeWeavers
|
---|
10 | *
|
---|
11 | * This library is free software; you can redistribute it and/or
|
---|
12 | * modify it under the terms of the GNU Lesser General Public
|
---|
13 | * License as published by the Free Software Foundation; either
|
---|
14 | * version 2.1 of the License, or (at your option) any later version.
|
---|
15 | *
|
---|
16 | * This library is distributed in the hope that it will be useful,
|
---|
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
19 | * Lesser General Public License for more details.
|
---|
20 | *
|
---|
21 | * You should have received a copy of the GNU Lesser General Public
|
---|
22 | * License along with this library; if not, write to the Free Software
|
---|
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
---|
24 | */
|
---|
25 |
|
---|
26 | /*
|
---|
27 | * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
|
---|
28 | * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
|
---|
29 | * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
|
---|
30 | * a choice of LGPL license versions is made available with the language indicating
|
---|
31 | * that LGPLv2 or any later version may be used, or where a choice of which version
|
---|
32 | * of the LGPL is applied is otherwise unspecified.
|
---|
33 | */
|
---|
34 |
|
---|
35 | #include "config.h"
|
---|
36 | #include <stdio.h>
|
---|
37 | #include "wined3d_private.h"
|
---|
38 |
|
---|
39 | #ifdef VBOX_WITH_WDDM
|
---|
40 | # include <VBoxCrHgsmi.h>
|
---|
41 | #endif
|
---|
42 |
|
---|
43 | #ifdef VBOX_WITH_VMSVGA
|
---|
44 | # ifdef RT_OS_WINDOWS
|
---|
45 | DECLIMPORT(void) APIENTRY glFinish(void);
|
---|
46 | # else
|
---|
47 | void glFinish(void);
|
---|
48 | # endif
|
---|
49 | #endif
|
---|
50 |
|
---|
51 | WINE_DEFAULT_DEBUG_CHANNEL(d3d);
|
---|
52 | WINE_DECLARE_DEBUG_CHANNEL(d3d_caps);
|
---|
53 |
|
---|
54 | #define GLINFO_LOCATION (*gl_info)
|
---|
55 | #define WINE_DEFAULT_VIDMEM (64 * 1024 * 1024)
|
---|
56 |
|
---|
57 | /* The d3d device ID */
|
---|
58 | #if 0 /* VBox: unused */
|
---|
59 | static const GUID IID_D3DDEVICE_D3DUID = { 0xaeb2cdd4, 0x6e41, 0x43ea, { 0x94,0x1c,0x83,0x61,0xcc,0x76,0x07,0x81 } };
|
---|
60 | #endif
|
---|
61 |
|
---|
62 |
|
---|
63 | /* Extension detection */
|
---|
64 | static const struct {
|
---|
65 | const char *extension_string;
|
---|
66 | GL_SupportedExt extension;
|
---|
67 | DWORD version;
|
---|
68 | } EXTENSION_MAP[] = {
|
---|
69 | /* APPLE */
|
---|
70 | {"GL_APPLE_client_storage", APPLE_CLIENT_STORAGE, 0 },
|
---|
71 | {"GL_APPLE_fence", APPLE_FENCE, 0 },
|
---|
72 | {"GL_APPLE_float_pixels", APPLE_FLOAT_PIXELS, 0 },
|
---|
73 | {"GL_APPLE_flush_buffer_range", APPLE_FLUSH_BUFFER_RANGE, 0 },
|
---|
74 | {"GL_APPLE_flush_render", APPLE_FLUSH_RENDER, 0 },
|
---|
75 | {"GL_APPLE_ycbcr_422", APPLE_YCBCR_422, 0 },
|
---|
76 |
|
---|
77 | /* ARB */
|
---|
78 | {"GL_ARB_color_buffer_float", ARB_COLOR_BUFFER_FLOAT, 0 },
|
---|
79 | {"GL_ARB_depth_buffer_float", ARB_DEPTH_BUFFER_FLOAT, 0 },
|
---|
80 | {"GL_ARB_depth_clamp", ARB_DEPTH_CLAMP, 0 },
|
---|
81 | {"GL_ARB_depth_texture", ARB_DEPTH_TEXTURE, 0 },
|
---|
82 | {"GL_ARB_draw_buffers", ARB_DRAW_BUFFERS, 0 },
|
---|
83 | {"GL_ARB_fragment_program", ARB_FRAGMENT_PROGRAM, 0 },
|
---|
84 | {"GL_ARB_fragment_shader", ARB_FRAGMENT_SHADER, 0 },
|
---|
85 | {"GL_ARB_framebuffer_object", ARB_FRAMEBUFFER_OBJECT, 0 },
|
---|
86 | {"GL_ARB_geometry_shader4", ARB_GEOMETRY_SHADER4, 0 },
|
---|
87 | {"GL_ARB_half_float_pixel", ARB_HALF_FLOAT_PIXEL, 0 },
|
---|
88 | {"GL_ARB_half_float_vertex", ARB_HALF_FLOAT_VERTEX, 0 },
|
---|
89 | {"GL_ARB_imaging", ARB_IMAGING, 0 },
|
---|
90 | {"GL_ARB_map_buffer_range", ARB_MAP_BUFFER_RANGE, 0 },
|
---|
91 | {"GL_ARB_multisample", ARB_MULTISAMPLE, 0 }, /* needs GLX_ARB_MULTISAMPLE as well */
|
---|
92 | {"GL_ARB_multitexture", ARB_MULTITEXTURE, 0 },
|
---|
93 | {"GL_ARB_occlusion_query", ARB_OCCLUSION_QUERY, 0 },
|
---|
94 | {"GL_ARB_pixel_buffer_object", ARB_PIXEL_BUFFER_OBJECT, 0 },
|
---|
95 | {"GL_ARB_point_parameters", ARB_POINT_PARAMETERS, 0 },
|
---|
96 | {"GL_ARB_point_sprite", ARB_POINT_SPRITE, 0 },
|
---|
97 | {"GL_ARB_provoking_vertex", ARB_PROVOKING_VERTEX, 0 },
|
---|
98 | {"GL_ARB_shader_objects", ARB_SHADER_OBJECTS, 0 },
|
---|
99 | {"GL_ARB_shader_texture_lod", ARB_SHADER_TEXTURE_LOD, 0 },
|
---|
100 | {"GL_ARB_shading_language_100", ARB_SHADING_LANGUAGE_100, 0 },
|
---|
101 | {"GL_ARB_sync", ARB_SYNC, 0 },
|
---|
102 | {"GL_ARB_texture_border_clamp", ARB_TEXTURE_BORDER_CLAMP, 0 },
|
---|
103 | {"GL_ARB_texture_compression", ARB_TEXTURE_COMPRESSION, 0 },
|
---|
104 | {"GL_ARB_texture_cube_map", ARB_TEXTURE_CUBE_MAP, 0 },
|
---|
105 | {"GL_ARB_texture_env_add", ARB_TEXTURE_ENV_ADD, 0 },
|
---|
106 | {"GL_ARB_texture_env_combine", ARB_TEXTURE_ENV_COMBINE, 0 },
|
---|
107 | {"GL_ARB_texture_env_dot3", ARB_TEXTURE_ENV_DOT3, 0 },
|
---|
108 | {"GL_ARB_texture_float", ARB_TEXTURE_FLOAT, 0 },
|
---|
109 | {"GL_ARB_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
|
---|
110 | {"GL_IBM_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
|
---|
111 | {"GL_ARB_texture_non_power_of_two", ARB_TEXTURE_NON_POWER_OF_TWO, MAKEDWORD_VERSION(2, 0) },
|
---|
112 | {"GL_ARB_texture_rectangle", ARB_TEXTURE_RECTANGLE, 0 },
|
---|
113 | {"GL_ARB_texture_rg", ARB_TEXTURE_RG, 0 },
|
---|
114 | {"GL_ARB_vertex_array_bgra", ARB_VERTEX_ARRAY_BGRA, 0 },
|
---|
115 | {"GL_ARB_vertex_blend", ARB_VERTEX_BLEND, 0 },
|
---|
116 | {"GL_ARB_vertex_buffer_object", ARB_VERTEX_BUFFER_OBJECT, 0 },
|
---|
117 | {"GL_ARB_vertex_program", ARB_VERTEX_PROGRAM, 0 },
|
---|
118 | {"GL_ARB_vertex_shader", ARB_VERTEX_SHADER, 0 },
|
---|
119 |
|
---|
120 | /* ATI */
|
---|
121 | {"GL_ATI_fragment_shader", ATI_FRAGMENT_SHADER, 0 },
|
---|
122 | {"GL_ATI_separate_stencil", ATI_SEPARATE_STENCIL, 0 },
|
---|
123 | {"GL_ATI_texture_compression_3dc", ATI_TEXTURE_COMPRESSION_3DC, 0 },
|
---|
124 | {"GL_ATI_texture_env_combine3", ATI_TEXTURE_ENV_COMBINE3, 0 },
|
---|
125 | {"GL_ATI_texture_mirror_once", ATI_TEXTURE_MIRROR_ONCE, 0 },
|
---|
126 |
|
---|
127 | /* EXT */
|
---|
128 | {"GL_EXT_blend_color", EXT_BLEND_COLOR, 0 },
|
---|
129 | {"GL_EXT_blend_equation_separate", EXT_BLEND_EQUATION_SEPARATE, 0 },
|
---|
130 | {"GL_EXT_blend_func_separate", EXT_BLEND_FUNC_SEPARATE, 0 },
|
---|
131 | {"GL_EXT_blend_minmax", EXT_BLEND_MINMAX, 0 },
|
---|
132 | {"GL_EXT_draw_buffers2", EXT_DRAW_BUFFERS2, 0 },
|
---|
133 | {"GL_EXT_fog_coord", EXT_FOG_COORD, 0 },
|
---|
134 | {"GL_EXT_framebuffer_blit", EXT_FRAMEBUFFER_BLIT, 0 },
|
---|
135 | {"GL_EXT_framebuffer_multisample", EXT_FRAMEBUFFER_MULTISAMPLE, 0 },
|
---|
136 | {"GL_EXT_framebuffer_object", EXT_FRAMEBUFFER_OBJECT, 0 },
|
---|
137 | {"GL_EXT_gpu_program_parameters", EXT_GPU_PROGRAM_PARAMETERS, 0 },
|
---|
138 | {"GL_EXT_gpu_shader4", EXT_GPU_SHADER4, 0 },
|
---|
139 | {"GL_EXT_packed_depth_stencil", EXT_PACKED_DEPTH_STENCIL, 0 },
|
---|
140 | {"GL_EXT_paletted_texture", EXT_PALETTED_TEXTURE, 0 },
|
---|
141 | {"GL_EXT_point_parameters", EXT_POINT_PARAMETERS, 0 },
|
---|
142 | {"GL_EXT_provoking_vertex", EXT_PROVOKING_VERTEX, 0 },
|
---|
143 | {"GL_EXT_secondary_color", EXT_SECONDARY_COLOR, 0 },
|
---|
144 | {"GL_EXT_stencil_two_side", EXT_STENCIL_TWO_SIDE, 0 },
|
---|
145 | {"GL_EXT_stencil_wrap", EXT_STENCIL_WRAP, 0 },
|
---|
146 | {"GL_EXT_texture3D", EXT_TEXTURE3D, MAKEDWORD_VERSION(1, 2) },
|
---|
147 | {"GL_EXT_texture_compression_rgtc", EXT_TEXTURE_COMPRESSION_RGTC, 0 },
|
---|
148 | {"GL_EXT_texture_compression_s3tc", EXT_TEXTURE_COMPRESSION_S3TC, 0 },
|
---|
149 | {"GL_EXT_texture_env_add", EXT_TEXTURE_ENV_ADD, 0 },
|
---|
150 | {"GL_EXT_texture_env_combine", EXT_TEXTURE_ENV_COMBINE, 0 },
|
---|
151 | {"GL_EXT_texture_env_dot3", EXT_TEXTURE_ENV_DOT3, 0 },
|
---|
152 | {"GL_EXT_texture_filter_anisotropic", EXT_TEXTURE_FILTER_ANISOTROPIC, 0 },
|
---|
153 | {"GL_EXT_texture_lod_bias", EXT_TEXTURE_LOD_BIAS, 0 },
|
---|
154 | {"GL_EXT_texture_sRGB", EXT_TEXTURE_SRGB, 0 },
|
---|
155 | {"GL_EXT_vertex_array_bgra", EXT_VERTEX_ARRAY_BGRA, 0 },
|
---|
156 |
|
---|
157 | /* NV */
|
---|
158 | {"GL_NV_depth_clamp", NV_DEPTH_CLAMP, 0 },
|
---|
159 | {"GL_NV_fence", NV_FENCE, 0 },
|
---|
160 | {"GL_NV_fog_distance", NV_FOG_DISTANCE, 0 },
|
---|
161 | {"GL_NV_fragment_program", NV_FRAGMENT_PROGRAM, 0 },
|
---|
162 | {"GL_NV_fragment_program2", NV_FRAGMENT_PROGRAM2, 0 },
|
---|
163 | {"GL_NV_fragment_program_option", NV_FRAGMENT_PROGRAM_OPTION, 0 },
|
---|
164 | {"GL_NV_half_float", NV_HALF_FLOAT, 0 },
|
---|
165 | {"GL_NV_light_max_exponent", NV_LIGHT_MAX_EXPONENT, 0 },
|
---|
166 | {"GL_NV_register_combiners", NV_REGISTER_COMBINERS, 0 },
|
---|
167 | {"GL_NV_register_combiners2", NV_REGISTER_COMBINERS2, 0 },
|
---|
168 | {"GL_NV_texgen_reflection", NV_TEXGEN_REFLECTION, 0 },
|
---|
169 | {"GL_NV_texture_env_combine4", NV_TEXTURE_ENV_COMBINE4, 0 },
|
---|
170 | {"GL_NV_texture_shader", NV_TEXTURE_SHADER, 0 },
|
---|
171 | {"GL_NV_texture_shader2", NV_TEXTURE_SHADER2, 0 },
|
---|
172 | {"GL_NV_vertex_program", NV_VERTEX_PROGRAM, 0 },
|
---|
173 | {"GL_NV_vertex_program1_1", NV_VERTEX_PROGRAM1_1, 0 },
|
---|
174 | {"GL_NV_vertex_program2", NV_VERTEX_PROGRAM2, 0 },
|
---|
175 | {"GL_NV_vertex_program2_option", NV_VERTEX_PROGRAM2_OPTION, 0 },
|
---|
176 | {"GL_NV_vertex_program3", NV_VERTEX_PROGRAM3, 0 },
|
---|
177 |
|
---|
178 | /* SGI */
|
---|
179 | {"GL_SGIS_generate_mipmap", SGIS_GENERATE_MIPMAP, 0 },
|
---|
180 | };
|
---|
181 |
|
---|
182 | /**********************************************************
|
---|
183 | * Utility functions follow
|
---|
184 | **********************************************************/
|
---|
185 |
|
---|
186 | const struct min_lookup minMipLookup[] =
|
---|
187 | {
|
---|
188 | /* NONE POINT LINEAR */
|
---|
189 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
|
---|
190 | {{GL_NEAREST, GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST_MIPMAP_LINEAR}}, /* POINT*/
|
---|
191 | {{GL_LINEAR, GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR_MIPMAP_LINEAR}}, /* LINEAR */
|
---|
192 | };
|
---|
193 |
|
---|
194 | const struct min_lookup minMipLookup_noFilter[] =
|
---|
195 | {
|
---|
196 | /* NONE POINT LINEAR */
|
---|
197 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
|
---|
198 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
|
---|
199 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* LINEAR */
|
---|
200 | };
|
---|
201 |
|
---|
202 | const struct min_lookup minMipLookup_noMip[] =
|
---|
203 | {
|
---|
204 | /* NONE POINT LINEAR */
|
---|
205 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
|
---|
206 | {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
|
---|
207 | {{GL_LINEAR, GL_LINEAR, GL_LINEAR }}, /* LINEAR */
|
---|
208 | };
|
---|
209 |
|
---|
210 | const GLenum magLookup[] =
|
---|
211 | {
|
---|
212 | /* NONE POINT LINEAR */
|
---|
213 | GL_NEAREST, GL_NEAREST, GL_LINEAR,
|
---|
214 | };
|
---|
215 |
|
---|
216 | const GLenum magLookup_noFilter[] =
|
---|
217 | {
|
---|
218 | /* NONE POINT LINEAR */
|
---|
219 | GL_NEAREST, GL_NEAREST, GL_NEAREST,
|
---|
220 | };
|
---|
221 |
|
---|
222 | /* drawStridedSlow attributes */
|
---|
223 | glAttribFunc position_funcs[WINED3D_FFP_EMIT_COUNT];
|
---|
224 | glAttribFunc diffuse_funcs[WINED3D_FFP_EMIT_COUNT];
|
---|
225 | glAttribFunc specular_func_3ubv;
|
---|
226 | glAttribFunc specular_funcs[WINED3D_FFP_EMIT_COUNT];
|
---|
227 | glAttribFunc normal_funcs[WINED3D_FFP_EMIT_COUNT];
|
---|
228 | glMultiTexCoordFunc multi_texcoord_funcs[WINED3D_FFP_EMIT_COUNT];
|
---|
229 |
|
---|
230 |
|
---|
231 | /**********************************************************
|
---|
232 | * IWineD3D parts follows
|
---|
233 | **********************************************************/
|
---|
234 |
|
---|
235 | #ifndef VBOX_WITH_VMSVGA
|
---|
236 | /* GL locking is done by the caller */
|
---|
237 | static inline BOOL test_arb_vs_offset_limit(const struct wined3d_gl_info *gl_info)
|
---|
238 | {
|
---|
239 | GLuint prog;
|
---|
240 | BOOL ret = FALSE;
|
---|
241 | const char *testcode =
|
---|
242 | "!!ARBvp1.0\n"
|
---|
243 | "PARAM C[66] = { program.env[0..65] };\n"
|
---|
244 | "ADDRESS A0;"
|
---|
245 | "PARAM zero = {0.0, 0.0, 0.0, 0.0};\n"
|
---|
246 | "ARL A0.x, zero.x;\n"
|
---|
247 | "MOV result.position, C[A0.x + 65];\n"
|
---|
248 | "END\n";
|
---|
249 |
|
---|
250 | while(glGetError());
|
---|
251 | GL_EXTCALL(glGenProgramsARB(1, &prog));
|
---|
252 | if(!prog) {
|
---|
253 | ERR("Failed to create an ARB offset limit test program\n");
|
---|
254 | }
|
---|
255 | GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
|
---|
256 | GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
|
---|
257 | (GLsizei)strlen(testcode), testcode));
|
---|
258 | if(glGetError() != 0) {
|
---|
259 | TRACE("OpenGL implementation does not allow indirect addressing offsets > 63\n");
|
---|
260 | TRACE("error: %s\n", debugstr_a((const char *)glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
|
---|
261 | ret = TRUE;
|
---|
262 | } else TRACE("OpenGL implementation allows offsets > 63\n");
|
---|
263 |
|
---|
264 | GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
|
---|
265 | GL_EXTCALL(glDeleteProgramsARB(1, &prog));
|
---|
266 | checkGLcall("ARB vp offset limit test cleanup");
|
---|
267 |
|
---|
268 | return ret;
|
---|
269 | }
|
---|
270 | #endif
|
---|
271 |
|
---|
272 | static DWORD ver_for_ext(GL_SupportedExt ext)
|
---|
273 | {
|
---|
274 | unsigned int i;
|
---|
275 | for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i) {
|
---|
276 | if(EXTENSION_MAP[i].extension == ext) {
|
---|
277 | return EXTENSION_MAP[i].version;
|
---|
278 | }
|
---|
279 | }
|
---|
280 | return 0;
|
---|
281 | }
|
---|
282 |
|
---|
283 | static BOOL match_ati_r300_to_500(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
284 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
285 | {
|
---|
286 | if (card_vendor != HW_VENDOR_ATI) return FALSE;
|
---|
287 | if (device == CARD_ATI_RADEON_9500) return TRUE;
|
---|
288 | if (device == CARD_ATI_RADEON_X700) return TRUE;
|
---|
289 | if (device == CARD_ATI_RADEON_X1600) return TRUE;
|
---|
290 | return FALSE;
|
---|
291 | }
|
---|
292 |
|
---|
293 | static BOOL match_geforce5(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
294 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
295 | {
|
---|
296 | if (card_vendor == HW_VENDOR_NVIDIA)
|
---|
297 | {
|
---|
298 | if (device == CARD_NVIDIA_GEFORCEFX_5800 || device == CARD_NVIDIA_GEFORCEFX_5600)
|
---|
299 | {
|
---|
300 | return TRUE;
|
---|
301 | }
|
---|
302 | }
|
---|
303 | return FALSE;
|
---|
304 | }
|
---|
305 |
|
---|
306 | static BOOL match_apple(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
307 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
308 | {
|
---|
309 | /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
|
---|
310 | * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
|
---|
311 | * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
|
---|
312 | *
|
---|
313 | * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
|
---|
314 | * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
|
---|
315 | * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
|
---|
316 | * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
|
---|
317 | * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
|
---|
318 | * the chance that other implementations support them is rather small since Win32 QuickTime uses
|
---|
319 | * DirectDraw, not OpenGL.
|
---|
320 | *
|
---|
321 | * This test has been moved into wined3d_guess_gl_vendor()
|
---|
322 | */
|
---|
323 | if (gl_vendor == GL_VENDOR_APPLE)
|
---|
324 | {
|
---|
325 | return TRUE;
|
---|
326 | }
|
---|
327 | return FALSE;
|
---|
328 | }
|
---|
329 |
|
---|
330 | /* Context activation is done by the caller. */
|
---|
331 | static void test_pbo_functionality(struct wined3d_gl_info *gl_info)
|
---|
332 | {
|
---|
333 | /* Some OpenGL implementations, namely Apple's Geforce 8 driver, advertises PBOs,
|
---|
334 | * but glTexSubImage from a PBO fails miserably, with the first line repeated over
|
---|
335 | * all the texture. This function detects this bug by its symptom and disables PBOs
|
---|
336 | * if the test fails.
|
---|
337 | *
|
---|
338 | * The test uploads a 4x4 texture via the PBO in the "native" format GL_BGRA,
|
---|
339 | * GL_UNSIGNED_INT_8_8_8_8_REV. This format triggers the bug, and it is what we use
|
---|
340 | * for D3DFMT_A8R8G8B8. Then the texture is read back without any PBO and the data
|
---|
341 | * read back is compared to the original. If they are equal PBOs are assumed to work,
|
---|
342 | * otherwise the PBO extension is disabled. */
|
---|
343 | GLuint texture, pbo;
|
---|
344 | static const unsigned int pattern[] =
|
---|
345 | {
|
---|
346 | 0x00000000, 0x000000ff, 0x0000ff00, 0x40ff0000,
|
---|
347 | 0x80ffffff, 0x40ffff00, 0x00ff00ff, 0x0000ffff,
|
---|
348 | 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x000000ff,
|
---|
349 | 0x80ff00ff, 0x0000ffff, 0x00ff00ff, 0x40ff00ff
|
---|
350 | };
|
---|
351 | unsigned int check[sizeof(pattern) / sizeof(pattern[0])];
|
---|
352 |
|
---|
353 | /* No PBO -> No point in testing them. */
|
---|
354 | if (!gl_info->supported[ARB_PIXEL_BUFFER_OBJECT]) return;
|
---|
355 |
|
---|
356 | ENTER_GL();
|
---|
357 |
|
---|
358 | while (glGetError());
|
---|
359 | glGenTextures(1, &texture);
|
---|
360 | glBindTexture(GL_TEXTURE_2D, texture);
|
---|
361 |
|
---|
362 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
|
---|
363 | glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 0);
|
---|
364 | checkGLcall("Specifying the PBO test texture");
|
---|
365 |
|
---|
366 | GL_EXTCALL(glGenBuffersARB(1, &pbo));
|
---|
367 | GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo));
|
---|
368 | GL_EXTCALL(glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, sizeof(pattern), pattern, GL_STREAM_DRAW_ARB));
|
---|
369 | checkGLcall("Specifying the PBO test pbo");
|
---|
370 |
|
---|
371 | glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
|
---|
372 | checkGLcall("Loading the PBO test texture");
|
---|
373 |
|
---|
374 | GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0));
|
---|
375 | #ifdef VBOX_WITH_VMSVGA
|
---|
376 | glFinish();
|
---|
377 | #else
|
---|
378 | wglFinish(); /* just to be sure */
|
---|
379 | #endif
|
---|
380 | memset(check, 0, sizeof(check));
|
---|
381 | glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, check);
|
---|
382 | checkGLcall("Reading back the PBO test texture");
|
---|
383 |
|
---|
384 | glDeleteTextures(1, &texture);
|
---|
385 | GL_EXTCALL(glDeleteBuffersARB(1, &pbo));
|
---|
386 | checkGLcall("PBO test cleanup");
|
---|
387 |
|
---|
388 | LEAVE_GL();
|
---|
389 |
|
---|
390 | if (memcmp(check, pattern, sizeof(check)))
|
---|
391 | {
|
---|
392 | WARN_(d3d_caps)("PBO test failed, read back data doesn't match original.\n");
|
---|
393 | WARN_(d3d_caps)("Disabling PBOs. This may result in slower performance.\n");
|
---|
394 | gl_info->supported[ARB_PIXEL_BUFFER_OBJECT] = FALSE;
|
---|
395 | }
|
---|
396 | else
|
---|
397 | {
|
---|
398 | TRACE_(d3d_caps)("PBO test successful.\n");
|
---|
399 | }
|
---|
400 | }
|
---|
401 |
|
---|
402 | static BOOL match_apple_intel(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
403 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
404 | {
|
---|
405 | return (card_vendor == HW_VENDOR_INTEL) && (gl_vendor == GL_VENDOR_APPLE);
|
---|
406 | }
|
---|
407 |
|
---|
408 | static BOOL match_apple_nonr500ati(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
409 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
410 | {
|
---|
411 | if (gl_vendor != GL_VENDOR_APPLE) return FALSE;
|
---|
412 | if (card_vendor != HW_VENDOR_ATI) return FALSE;
|
---|
413 | if (device == CARD_ATI_RADEON_X1600) return FALSE;
|
---|
414 | return TRUE;
|
---|
415 | }
|
---|
416 |
|
---|
417 | static BOOL match_fglrx(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
418 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
419 | {
|
---|
420 | return gl_vendor == GL_VENDOR_FGLRX;
|
---|
421 |
|
---|
422 | }
|
---|
423 |
|
---|
424 | static BOOL match_dx10_capable(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
425 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
426 | {
|
---|
427 | /* DX9 cards support 40 single float varyings in hardware, most drivers report 32. ATI misreports
|
---|
428 | * 44 varyings. So assume that if we have more than 44 varyings we have a dx10 card.
|
---|
429 | * This detection is for the gl_ClipPos varying quirk. If a d3d9 card really supports more than 44
|
---|
430 | * varyings and we subtract one in dx9 shaders its not going to hurt us because the dx9 limit is
|
---|
431 | * hardcoded
|
---|
432 | *
|
---|
433 | * dx10 cards usually have 64 varyings */
|
---|
434 | return gl_info->limits.glsl_varyings > 44;
|
---|
435 | }
|
---|
436 |
|
---|
437 | /* A GL context is provided by the caller */
|
---|
438 | static BOOL match_allows_spec_alpha(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
439 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
440 | {
|
---|
441 | GLenum error;
|
---|
442 | DWORD data[16];
|
---|
443 |
|
---|
444 | if (!gl_info->supported[EXT_SECONDARY_COLOR]) return FALSE;
|
---|
445 |
|
---|
446 | ENTER_GL();
|
---|
447 | while(glGetError());
|
---|
448 | GL_EXTCALL(glSecondaryColorPointerEXT)(4, GL_UNSIGNED_BYTE, 4, data);
|
---|
449 | error = glGetError();
|
---|
450 | LEAVE_GL();
|
---|
451 |
|
---|
452 | if(error == GL_NO_ERROR)
|
---|
453 | {
|
---|
454 | TRACE("GL Implementation accepts 4 component specular color pointers\n");
|
---|
455 | return TRUE;
|
---|
456 | }
|
---|
457 | else
|
---|
458 | {
|
---|
459 | TRACE("GL implementation does not accept 4 component specular colors, error %s\n",
|
---|
460 | debug_glerror(error));
|
---|
461 | return FALSE;
|
---|
462 | }
|
---|
463 | }
|
---|
464 |
|
---|
465 | static BOOL match_apple_nvts(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
466 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
467 | {
|
---|
468 | if (!match_apple(gl_info, gl_renderer, gl_vendor, card_vendor, device)) return FALSE;
|
---|
469 | return gl_info->supported[NV_TEXTURE_SHADER];
|
---|
470 | }
|
---|
471 |
|
---|
472 | #ifndef VBOX_WITH_VMSVGA
|
---|
473 | /* A GL context is provided by the caller */
|
---|
474 | static BOOL match_broken_nv_clip(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
475 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
476 | {
|
---|
477 | GLuint prog;
|
---|
478 | BOOL ret = FALSE;
|
---|
479 | GLint pos;
|
---|
480 | const char *testcode =
|
---|
481 | "!!ARBvp1.0\n"
|
---|
482 | "OPTION NV_vertex_program2;\n"
|
---|
483 | "MOV result.clip[0], 0.0;\n"
|
---|
484 | "MOV result.position, 0.0;\n"
|
---|
485 | "END\n";
|
---|
486 |
|
---|
487 | if (!gl_info->supported[NV_VERTEX_PROGRAM2_OPTION]) return FALSE;
|
---|
488 |
|
---|
489 | ENTER_GL();
|
---|
490 | while(glGetError());
|
---|
491 |
|
---|
492 | GL_EXTCALL(glGenProgramsARB(1, &prog));
|
---|
493 | if(!prog)
|
---|
494 | {
|
---|
495 | ERR("Failed to create the NVvp clip test program\n");
|
---|
496 | LEAVE_GL();
|
---|
497 | return FALSE;
|
---|
498 | }
|
---|
499 | GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
|
---|
500 | GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
|
---|
501 | (GLsizei)strlen(testcode), testcode));
|
---|
502 | glGetIntegerv(GL_PROGRAM_ERROR_POSITION_ARB, &pos);
|
---|
503 | if(pos != -1)
|
---|
504 | {
|
---|
505 | WARN("GL_NV_vertex_program2_option result.clip[] test failed\n");
|
---|
506 | TRACE("error: %s\n", debugstr_a((const char *)glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
|
---|
507 | ret = TRUE;
|
---|
508 | while(glGetError());
|
---|
509 | }
|
---|
510 | else TRACE("GL_NV_vertex_program2_option result.clip[] test passed\n");
|
---|
511 |
|
---|
512 | GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
|
---|
513 | GL_EXTCALL(glDeleteProgramsARB(1, &prog));
|
---|
514 | checkGLcall("GL_NV_vertex_program2_option result.clip[] test cleanup");
|
---|
515 |
|
---|
516 | LEAVE_GL();
|
---|
517 | return ret;
|
---|
518 | }
|
---|
519 | #endif
|
---|
520 |
|
---|
521 | /* Context activation is done by the caller. */
|
---|
522 | static BOOL match_fbo_tex_update(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
523 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
524 | {
|
---|
525 | char data[4 * 4 * 4];
|
---|
526 | GLuint tex, fbo;
|
---|
527 | GLenum status;
|
---|
528 |
|
---|
529 | #ifndef VBOX_WITH_VMSVGA
|
---|
530 | if (wined3d_settings.offscreen_rendering_mode != ORM_FBO) return FALSE;
|
---|
531 | #endif
|
---|
532 | memset(data, 0xcc, sizeof(data));
|
---|
533 |
|
---|
534 | ENTER_GL();
|
---|
535 |
|
---|
536 | glGenTextures(1, &tex);
|
---|
537 | glBindTexture(GL_TEXTURE_2D, tex);
|
---|
538 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
---|
539 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
---|
540 | glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
|
---|
541 | checkGLcall("glTexImage2D");
|
---|
542 |
|
---|
543 | gl_info->fbo_ops.glGenFramebuffers(1, &fbo);
|
---|
544 | gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, fbo);
|
---|
545 | gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
|
---|
546 | checkGLcall("glFramebufferTexture2D");
|
---|
547 |
|
---|
548 | status = gl_info->fbo_ops.glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
---|
549 | if (status != GL_FRAMEBUFFER_COMPLETE) ERR("FBO status %#x\n", status);
|
---|
550 | checkGLcall("glCheckFramebufferStatus");
|
---|
551 |
|
---|
552 | memset(data, 0x11, sizeof(data));
|
---|
553 | glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
|
---|
554 | checkGLcall("glTexSubImage2D");
|
---|
555 |
|
---|
556 | glClearColor(0.996, 0.729, 0.745, 0.792);
|
---|
557 | glClear(GL_COLOR_BUFFER_BIT);
|
---|
558 | checkGLcall("glClear");
|
---|
559 |
|
---|
560 | glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
|
---|
561 | checkGLcall("glGetTexImage");
|
---|
562 |
|
---|
563 | gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
---|
564 | gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, 0);
|
---|
565 | glBindTexture(GL_TEXTURE_2D, 0);
|
---|
566 | checkGLcall("glBindTexture");
|
---|
567 |
|
---|
568 | gl_info->fbo_ops.glDeleteFramebuffers(1, &fbo);
|
---|
569 | glDeleteTextures(1, &tex);
|
---|
570 | checkGLcall("glDeleteTextures");
|
---|
571 |
|
---|
572 | LEAVE_GL();
|
---|
573 |
|
---|
574 | return *(DWORD *)data == 0x11111111;
|
---|
575 | }
|
---|
576 |
|
---|
577 | static void quirk_arb_constants(struct wined3d_gl_info *gl_info)
|
---|
578 | {
|
---|
579 | TRACE_(d3d_caps)("Using ARB vs constant limit(=%u) for GLSL.\n", gl_info->limits.arb_vs_native_constants);
|
---|
580 | gl_info->limits.glsl_vs_float_constants = gl_info->limits.arb_vs_native_constants;
|
---|
581 | TRACE_(d3d_caps)("Using ARB ps constant limit(=%u) for GLSL.\n", gl_info->limits.arb_ps_native_constants);
|
---|
582 | gl_info->limits.glsl_ps_float_constants = gl_info->limits.arb_ps_native_constants;
|
---|
583 | }
|
---|
584 |
|
---|
585 | static void quirk_apple_glsl_constants(struct wined3d_gl_info *gl_info)
|
---|
586 | {
|
---|
587 | quirk_arb_constants(gl_info);
|
---|
588 | /* MacOS needs uniforms for relative addressing offsets. This can accumulate to quite a few uniforms.
|
---|
589 | * Beyond that the general uniform isn't optimal, so reserve a number of uniforms. 12 vec4's should
|
---|
590 | * allow 48 different offsets or other helper immediate values. */
|
---|
591 | TRACE_(d3d_caps)("Reserving 12 GLSL constants for compiler private use.\n");
|
---|
592 | gl_info->reserved_glsl_constants = max(gl_info->reserved_glsl_constants, 12);
|
---|
593 | }
|
---|
594 |
|
---|
595 | /* fglrx crashes with a very bad kernel panic if GL_POINT_SPRITE_ARB is set to GL_COORD_REPLACE_ARB
|
---|
596 | * on more than one texture unit. This means that the d3d9 visual point size test will cause a
|
---|
597 | * kernel panic on any machine running fglrx 9.3(latest that supports r300 to r500 cards). This
|
---|
598 | * quirk only enables point sprites on the first texture unit. This keeps point sprites working in
|
---|
599 | * most games, but avoids the crash
|
---|
600 | *
|
---|
601 | * A more sophisticated way would be to find all units that need texture coordinates and enable
|
---|
602 | * point sprites for one if only one is found, and software emulate point sprites in drawStridedSlow
|
---|
603 | * if more than one unit needs texture coordinates(This requires software ffp and vertex shaders though)
|
---|
604 | *
|
---|
605 | * Note that disabling the extension entirely does not gain predictability because there is no point
|
---|
606 | * sprite capability flag in d3d, so the potential rendering bugs are the same if we disable the extension. */
|
---|
607 | static void quirk_one_point_sprite(struct wined3d_gl_info *gl_info)
|
---|
608 | {
|
---|
609 | if (gl_info->supported[ARB_POINT_SPRITE])
|
---|
610 | {
|
---|
611 | TRACE("Limiting point sprites to one texture unit.\n");
|
---|
612 | gl_info->limits.point_sprite_units = 1;
|
---|
613 | }
|
---|
614 | }
|
---|
615 |
|
---|
616 | static void quirk_ati_dx9(struct wined3d_gl_info *gl_info)
|
---|
617 | {
|
---|
618 | quirk_arb_constants(gl_info);
|
---|
619 |
|
---|
620 | /* MacOS advertises GL_ARB_texture_non_power_of_two on ATI r500 and earlier cards, although
|
---|
621 | * these cards only support GL_ARB_texture_rectangle(D3DPTEXTURECAPS_NONPOW2CONDITIONAL).
|
---|
622 | * If real NP2 textures are used, the driver falls back to software. We could just remove the
|
---|
623 | * extension and use GL_ARB_texture_rectangle instead, but texture_rectangle is inconventient
|
---|
624 | * due to the non-normalized texture coordinates. Thus set an internal extension flag,
|
---|
625 | * GL_WINE_normalized_texrect, which signals the code that it can use non power of two textures
|
---|
626 | * as per GL_ARB_texture_non_power_of_two, but has to stick to the texture_rectangle limits.
|
---|
627 | *
|
---|
628 | * fglrx doesn't advertise GL_ARB_texture_non_power_of_two, but it advertises opengl 2.0 which
|
---|
629 | * has this extension promoted to core. The extension loading code sets this extension supported
|
---|
630 | * due to that, so this code works on fglrx as well. */
|
---|
631 | if(gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
|
---|
632 | {
|
---|
633 | TRACE("GL_ARB_texture_non_power_of_two advertised on R500 or earlier card, removing.\n");
|
---|
634 | gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
|
---|
635 | gl_info->supported[WINE_NORMALIZED_TEXRECT] = TRUE;
|
---|
636 | }
|
---|
637 |
|
---|
638 | /* fglrx has the same structural issues as the one described in quirk_apple_glsl_constants, although
|
---|
639 | * it is generally more efficient. Reserve just 8 constants. */
|
---|
640 | TRACE_(d3d_caps)("Reserving 8 GLSL constants for compiler private use.\n");
|
---|
641 | gl_info->reserved_glsl_constants = max(gl_info->reserved_glsl_constants, 8);
|
---|
642 | }
|
---|
643 |
|
---|
644 | static void quirk_no_np2(struct wined3d_gl_info *gl_info)
|
---|
645 | {
|
---|
646 | /* The nVidia GeForceFX series reports OpenGL 2.0 capabilities with the latest drivers versions, but
|
---|
647 | * doesn't explicitly advertise the ARB_tex_npot extension in the GL extension string.
|
---|
648 | * This usually means that ARB_tex_npot is supported in hardware as long as the application is staying
|
---|
649 | * within the limits enforced by the ARB_texture_rectangle extension. This however is not true for the
|
---|
650 | * FX series, which instantly falls back to a slower software path as soon as ARB_tex_npot is used.
|
---|
651 | * We therefore completely remove ARB_tex_npot from the list of supported extensions.
|
---|
652 | *
|
---|
653 | * Note that wine_normalized_texrect can't be used in this case because internally it uses ARB_tex_npot,
|
---|
654 | * triggering the software fallback. There is not much we can do here apart from disabling the
|
---|
655 | * software-emulated extension and reenable ARB_tex_rect (which was previously disabled
|
---|
656 | * in IWineD3DImpl_FillGLCaps).
|
---|
657 | * This fixup removes performance problems on both the FX 5900 and FX 5700 (e.g. for framebuffer
|
---|
658 | * post-processing effects in the game "Max Payne 2").
|
---|
659 | * The behaviour can be verified through a simple test app attached in bugreport #14724. */
|
---|
660 | TRACE("GL_ARB_texture_non_power_of_two advertised through OpenGL 2.0 on NV FX card, removing.\n");
|
---|
661 | gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
|
---|
662 | gl_info->supported[ARB_TEXTURE_RECTANGLE] = TRUE;
|
---|
663 | }
|
---|
664 |
|
---|
665 | static void quirk_texcoord_w(struct wined3d_gl_info *gl_info)
|
---|
666 | {
|
---|
667 | /* The Intel GPUs on MacOS set the .w register of texcoords to 0.0 by default, which causes problems
|
---|
668 | * with fixed function fragment processing. Ideally this flag should be detected with a test shader
|
---|
669 | * and OpenGL feedback mode, but some GL implementations (MacOS ATI at least, probably all MacOS ones)
|
---|
670 | * do not like vertex shaders in feedback mode and return an error, even though it should be valid
|
---|
671 | * according to the spec.
|
---|
672 | *
|
---|
673 | * We don't want to enable this on all cards, as it adds an extra instruction per texcoord used. This
|
---|
674 | * makes the shader slower and eats instruction slots which should be available to the d3d app.
|
---|
675 | *
|
---|
676 | * ATI Radeon HD 2xxx cards on MacOS have the issue. Instead of checking for the buggy cards, blacklist
|
---|
677 | * all radeon cards on Macs and whitelist the good ones. That way we're prepared for the future. If
|
---|
678 | * this workaround is activated on cards that do not need it, it won't break things, just affect
|
---|
679 | * performance negatively. */
|
---|
680 | TRACE("Enabling vertex texture coord fixes in vertex shaders.\n");
|
---|
681 | gl_info->quirks |= WINED3D_QUIRK_SET_TEXCOORD_W;
|
---|
682 | }
|
---|
683 |
|
---|
684 | static void quirk_clip_varying(struct wined3d_gl_info *gl_info)
|
---|
685 | {
|
---|
686 | gl_info->quirks |= WINED3D_QUIRK_GLSL_CLIP_VARYING;
|
---|
687 | }
|
---|
688 |
|
---|
689 | static void quirk_allows_specular_alpha(struct wined3d_gl_info *gl_info)
|
---|
690 | {
|
---|
691 | gl_info->quirks |= WINED3D_QUIRK_ALLOWS_SPECULAR_ALPHA;
|
---|
692 | }
|
---|
693 |
|
---|
694 | static void quirk_apple_nvts(struct wined3d_gl_info *gl_info)
|
---|
695 | {
|
---|
696 | gl_info->supported[NV_TEXTURE_SHADER] = FALSE;
|
---|
697 | gl_info->supported[NV_TEXTURE_SHADER2] = FALSE;
|
---|
698 | }
|
---|
699 |
|
---|
700 | #ifndef VBOX_WITH_VMSVGA
|
---|
701 | static void quirk_disable_nvvp_clip(struct wined3d_gl_info *gl_info)
|
---|
702 | {
|
---|
703 | gl_info->quirks |= WINED3D_QUIRK_NV_CLIP_BROKEN;
|
---|
704 | }
|
---|
705 | #endif
|
---|
706 |
|
---|
707 | static void quirk_fbo_tex_update(struct wined3d_gl_info *gl_info)
|
---|
708 | {
|
---|
709 | gl_info->quirks |= WINED3D_QUIRK_FBO_TEX_UPDATE;
|
---|
710 | }
|
---|
711 |
|
---|
712 | static BOOL match_ati_hd4800(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
713 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
714 | {
|
---|
715 | if (card_vendor != HW_VENDOR_ATI) return FALSE;
|
---|
716 | if (device == CARD_ATI_RADEON_HD4800) return TRUE;
|
---|
717 | return FALSE;
|
---|
718 | }
|
---|
719 |
|
---|
720 | static void quirk_fullsize_blit(struct wined3d_gl_info *gl_info)
|
---|
721 | {
|
---|
722 | gl_info->quirks |= WINED3D_QUIRK_FULLSIZE_BLIT;
|
---|
723 | }
|
---|
724 |
|
---|
725 | #ifdef VBOX_WITH_WDDM
|
---|
726 | static BOOL match_mesa_nvidia(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
727 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
728 | {
|
---|
729 | if (card_vendor != HW_VENDOR_NVIDIA) return FALSE;
|
---|
730 | if (gl_vendor != GL_VENDOR_MESA) return FALSE;
|
---|
731 | return TRUE;
|
---|
732 | }
|
---|
733 |
|
---|
734 | static void quirk_no_shader_3(struct wined3d_gl_info *gl_info)
|
---|
735 | {
|
---|
736 | int vs_selected_mode, ps_selected_mode;
|
---|
737 | select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
|
---|
738 | if (vs_selected_mode != SHADER_GLSL && ps_selected_mode != SHADER_GLSL)
|
---|
739 | return;
|
---|
740 |
|
---|
741 | gl_info->limits.arb_ps_instructions = 512;
|
---|
742 | }
|
---|
743 | #endif
|
---|
744 |
|
---|
745 | static BOOL match_intel(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
746 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
747 | {
|
---|
748 | if (card_vendor == HW_VENDOR_INTEL) return TRUE;
|
---|
749 | if (gl_vendor == GL_VENDOR_INTEL) return TRUE;
|
---|
750 | return FALSE;
|
---|
751 | }
|
---|
752 |
|
---|
753 | static void quirk_force_blit(struct wined3d_gl_info *gl_info)
|
---|
754 | {
|
---|
755 | gl_info->quirks |= WINED3D_QUIRK_FORCE_BLIT;
|
---|
756 | }
|
---|
757 |
|
---|
758 | struct driver_quirk
|
---|
759 | {
|
---|
760 | BOOL (*match)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
761 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device);
|
---|
762 | void (*apply)(struct wined3d_gl_info *gl_info);
|
---|
763 | const char *description;
|
---|
764 | };
|
---|
765 |
|
---|
766 | static const struct driver_quirk quirk_table[] =
|
---|
767 | {
|
---|
768 | {
|
---|
769 | match_ati_r300_to_500,
|
---|
770 | quirk_ati_dx9,
|
---|
771 | "ATI GLSL constant and normalized texrect quirk"
|
---|
772 | },
|
---|
773 | /* MacOS advertises more GLSL vertex shader uniforms than supported by the hardware, and if more are
|
---|
774 | * used it falls back to software. While the compiler can detect if the shader uses all declared
|
---|
775 | * uniforms, the optimization fails if the shader uses relative addressing. So any GLSL shader
|
---|
776 | * using relative addressing falls back to software.
|
---|
777 | *
|
---|
778 | * ARB vp gives the correct amount of uniforms, so use it instead of GLSL. */
|
---|
779 | {
|
---|
780 | match_apple,
|
---|
781 | quirk_apple_glsl_constants,
|
---|
782 | "Apple GLSL uniform override"
|
---|
783 | },
|
---|
784 | {
|
---|
785 | match_geforce5,
|
---|
786 | quirk_no_np2,
|
---|
787 | "Geforce 5 NP2 disable"
|
---|
788 | },
|
---|
789 | {
|
---|
790 | match_apple_intel,
|
---|
791 | quirk_texcoord_w,
|
---|
792 | "Init texcoord .w for Apple Intel GPU driver"
|
---|
793 | },
|
---|
794 | {
|
---|
795 | match_apple_nonr500ati,
|
---|
796 | quirk_texcoord_w,
|
---|
797 | "Init texcoord .w for Apple ATI >= r600 GPU driver"
|
---|
798 | },
|
---|
799 | {
|
---|
800 | match_fglrx,
|
---|
801 | quirk_one_point_sprite,
|
---|
802 | "Fglrx point sprite crash workaround"
|
---|
803 | },
|
---|
804 | {
|
---|
805 | match_dx10_capable,
|
---|
806 | quirk_clip_varying,
|
---|
807 | "Reserved varying for gl_ClipPos"
|
---|
808 | },
|
---|
809 | {
|
---|
810 | /* GL_EXT_secondary_color does not allow 4 component secondary colors, but most
|
---|
811 | * GL implementations accept it. The Mac GL is the only implementation known to
|
---|
812 | * reject it.
|
---|
813 | *
|
---|
814 | * If we can pass 4 component specular colors, do it, because (a) we don't have
|
---|
815 | * to screw around with the data, and (b) the D3D fixed function vertex pipeline
|
---|
816 | * passes specular alpha to the pixel shader if any is used. Otherwise the
|
---|
817 | * specular alpha is used to pass the fog coordinate, which we pass to opengl
|
---|
818 | * via GL_EXT_fog_coord.
|
---|
819 | */
|
---|
820 | match_allows_spec_alpha,
|
---|
821 | quirk_allows_specular_alpha,
|
---|
822 | "Allow specular alpha quirk"
|
---|
823 | },
|
---|
824 | {
|
---|
825 | /* The pixel formats provided by GL_NV_texture_shader are broken on OSX
|
---|
826 | * (rdar://5682521).
|
---|
827 | */
|
---|
828 | match_apple_nvts,
|
---|
829 | quirk_apple_nvts,
|
---|
830 | "Apple NV_texture_shader disable"
|
---|
831 | },
|
---|
832 | #ifndef VBOX_WITH_VMSVGA
|
---|
833 | {
|
---|
834 | match_broken_nv_clip,
|
---|
835 | quirk_disable_nvvp_clip,
|
---|
836 | "Apple NV_vertex_program clip bug quirk"
|
---|
837 | },
|
---|
838 | #endif
|
---|
839 | {
|
---|
840 | match_fbo_tex_update,
|
---|
841 | quirk_fbo_tex_update,
|
---|
842 | "FBO rebind for attachment updates"
|
---|
843 | },
|
---|
844 | {
|
---|
845 | match_ati_hd4800,
|
---|
846 | quirk_fullsize_blit,
|
---|
847 | "Fullsize blit"
|
---|
848 | },
|
---|
849 | #ifdef VBOX_WITH_WDDM
|
---|
850 | {
|
---|
851 | match_mesa_nvidia,
|
---|
852 | quirk_no_shader_3,
|
---|
853 | "disable shader 3 support"
|
---|
854 | },
|
---|
855 | #endif
|
---|
856 | {
|
---|
857 | match_intel,
|
---|
858 | quirk_force_blit,
|
---|
859 | "force framebuffer blit when possible"
|
---|
860 | }
|
---|
861 | };
|
---|
862 |
|
---|
863 | /* Context activation is done by the caller. */
|
---|
864 | static void fixup_extensions(struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
865 | enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
|
---|
866 | {
|
---|
867 | unsigned int i;
|
---|
868 |
|
---|
869 | for (i = 0; i < (sizeof(quirk_table) / sizeof(*quirk_table)); ++i)
|
---|
870 | {
|
---|
871 | if (!quirk_table[i].match(gl_info, gl_renderer, gl_vendor, card_vendor, device)) continue;
|
---|
872 | TRACE_(d3d_caps)("Applying driver quirk \"%s\".\n", quirk_table[i].description);
|
---|
873 | quirk_table[i].apply(gl_info);
|
---|
874 | }
|
---|
875 |
|
---|
876 | /* Find out if PBOs work as they are supposed to. */
|
---|
877 | test_pbo_functionality(gl_info);
|
---|
878 | }
|
---|
879 |
|
---|
880 |
|
---|
881 | /* Certain applications (Steam) complain if we report an outdated driver version. In general,
|
---|
882 | * reporting a driver version is moot because we are not the Windows driver, and we have different
|
---|
883 | * bugs, features, etc.
|
---|
884 | *
|
---|
885 | * The driver version has the form "x.y.z.w".
|
---|
886 | *
|
---|
887 | * "x" is the Windows version the driver is meant for:
|
---|
888 | * 4 -> 95/98/NT4
|
---|
889 | * 5 -> 2000
|
---|
890 | * 6 -> 2000/XP
|
---|
891 | * 7 -> Vista
|
---|
892 | * 8 -> Win 7
|
---|
893 | *
|
---|
894 | * "y" is the Direct3D level the driver supports:
|
---|
895 | * 11 -> d3d6
|
---|
896 | * 12 -> d3d7
|
---|
897 | * 13 -> d3d8
|
---|
898 | * 14 -> d3d9
|
---|
899 | * 15 -> d3d10
|
---|
900 | *
|
---|
901 | * "z" is unknown, possibly vendor specific.
|
---|
902 | *
|
---|
903 | * "w" is the vendor specific driver version.
|
---|
904 | */
|
---|
905 | struct driver_version_information
|
---|
906 | {
|
---|
907 | WORD vendor; /* reported PCI card vendor ID */
|
---|
908 | WORD card; /* reported PCI card device ID */
|
---|
909 | const char *description; /* Description of the card e.g. NVIDIA RIVA TNT */
|
---|
910 | WORD d3d_level; /* driver hiword to report */
|
---|
911 | WORD lopart_hi, lopart_lo; /* driver loword to report */
|
---|
912 | };
|
---|
913 |
|
---|
914 | #if 0 /* VBox: unused */
|
---|
915 | static const struct driver_version_information driver_version_table[] =
|
---|
916 | {
|
---|
917 | /* Nvidia drivers. Geforce6 and newer cards are supported by the current driver (180.x)
|
---|
918 | * GeforceFX support is up to 173.x, - driver uses numbering x.y.11.7341 for 173.41 where x is the windows revision (6=2000/xp, 7=vista), y is unknown
|
---|
919 | * Geforce2MX/3/4 up to 96.x - driver uses numbering 9.6.8.9 for 96.89
|
---|
920 | * TNT/Geforce1/2 up to 71.x - driver uses numbering 7.1.8.6 for 71.86
|
---|
921 | *
|
---|
922 | * All version numbers used below are from the Linux nvidia drivers. */
|
---|
923 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT, "NVIDIA RIVA TNT", 1, 8, 6 },
|
---|
924 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT2, "NVIDIA RIVA TNT2/TNT2 Pro", 1, 8, 6 },
|
---|
925 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE, "NVIDIA GeForce 256", 1, 8, 6 },
|
---|
926 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2_MX, "NVIDIA GeForce2 MX/MX 400", 6, 4, 3 },
|
---|
927 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2, "NVIDIA GeForce2 GTS/GeForce2 Pro", 1, 8, 6 },
|
---|
928 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE3, "NVIDIA GeForce3", 6, 10, 9371 },
|
---|
929 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_MX, "NVIDIA GeForce4 MX 460", 6, 10, 9371 },
|
---|
930 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_TI4200, "NVIDIA GeForce4 Ti 4200", 6, 10, 9371 },
|
---|
931 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5200, "NVIDIA GeForce FX 5200", 15, 11, 7516 },
|
---|
932 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5600, "NVIDIA GeForce FX 5600", 15, 11, 7516 },
|
---|
933 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5800, "NVIDIA GeForce FX 5800", 15, 11, 7516 },
|
---|
934 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6200, "NVIDIA GeForce 6200", 15, 11, 8618 },
|
---|
935 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6600GT, "NVIDIA GeForce 6600 GT", 15, 11, 8618 },
|
---|
936 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6800, "NVIDIA GeForce 6800", 15, 11, 8618 },
|
---|
937 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7300, "NVIDIA GeForce Go 7300", 15, 11, 8585 },
|
---|
938 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7400, "NVIDIA GeForce Go 7400", 15, 11, 8585 },
|
---|
939 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7600, "NVIDIA GeForce 7600 GT", 15, 11, 8618 },
|
---|
940 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7800GT, "NVIDIA GeForce 7800 GT", 15, 11, 8618 },
|
---|
941 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8300GS, "NVIDIA GeForce 8300 GS", 15, 11, 8618 },
|
---|
942 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600GT, "NVIDIA GeForce 8600 GT", 15, 11, 8618 },
|
---|
943 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600MGT, "NVIDIA GeForce 8600M GT", 15, 11, 8585 },
|
---|
944 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8800GTS, "NVIDIA GeForce 8800 GTS", 15, 11, 8618 },
|
---|
945 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9200, "NVIDIA GeForce 9200", 15, 11, 8618 },
|
---|
946 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9400GT, "NVIDIA GeForce 9400 GT", 15, 11, 8618 },
|
---|
947 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9500GT, "NVIDIA GeForce 9500 GT", 15, 11, 8618 },
|
---|
948 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9600GT, "NVIDIA GeForce 9600 GT", 15, 11, 8618 },
|
---|
949 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9800GT, "NVIDIA GeForce 9800 GT", 15, 11, 8618 },
|
---|
950 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX260, "NVIDIA GeForce GTX 260", 15, 11, 8618 },
|
---|
951 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX275, "NVIDIA GeForce GTX 275", 15, 11, 8618 },
|
---|
952 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX280, "NVIDIA GeForce GTX 280", 15, 11, 8618 },
|
---|
953 | {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT240, "NVIDIA GeForce GT 240", 15, 11, 8618 },
|
---|
954 |
|
---|
955 | /* ATI cards. The driver versions are somewhat similar, but not quite the same. Let's hardcode. */
|
---|
956 | {HW_VENDOR_ATI, CARD_ATI_RADEON_9500, "ATI Radeon 9500", 14, 10, 6764 },
|
---|
957 | {HW_VENDOR_ATI, CARD_ATI_RADEON_X700, "ATI Radeon X700 SE", 14, 10, 6764 },
|
---|
958 | {HW_VENDOR_ATI, CARD_ATI_RADEON_X1600, "ATI Radeon X1600 Series", 14, 10, 6764 },
|
---|
959 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2350, "ATI Mobility Radeon HD 2350", 14, 10, 6764 },
|
---|
960 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2600, "ATI Mobility Radeon HD 2600", 14, 10, 6764 },
|
---|
961 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2900, "ATI Radeon HD 2900 XT", 14, 10, 6764 },
|
---|
962 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4350, "ATI Radeon HD 4350", 14, 10, 6764 },
|
---|
963 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4600, "ATI Radeon HD 4600 Series", 14, 10, 6764 },
|
---|
964 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4700, "ATI Radeon HD 4700 Series", 14, 10, 6764 },
|
---|
965 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4800, "ATI Radeon HD 4800 Series", 14, 10, 6764 },
|
---|
966 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD5700, "ATI Radeon HD 5700 Series", 14, 10, 8681 },
|
---|
967 | {HW_VENDOR_ATI, CARD_ATI_RADEON_HD5800, "ATI Radeon HD 5800 Series", 14, 10, 8681 },
|
---|
968 |
|
---|
969 | /* TODO: Add information about legacy ATI hardware, Intel and other cards. */
|
---|
970 | };
|
---|
971 | #endif /* VBox: unused */
|
---|
972 |
|
---|
973 |
|
---|
974 | static DWORD wined3d_parse_gl_version(const char *gl_version)
|
---|
975 | {
|
---|
976 | const char *ptr = gl_version;
|
---|
977 | int major, minor;
|
---|
978 |
|
---|
979 | major = atoi(ptr);
|
---|
980 | if (major <= 0) ERR_(d3d_caps)("Invalid opengl major version: %d.\n", major);
|
---|
981 |
|
---|
982 | while (isdigit(*ptr)) ++ptr;
|
---|
983 | if (*ptr++ != '.') ERR_(d3d_caps)("Invalid opengl version string: %s.\n", debugstr_a(gl_version));
|
---|
984 |
|
---|
985 | minor = atoi(ptr);
|
---|
986 |
|
---|
987 | TRACE_(d3d_caps)("Found OpenGL version: %d.%d.\n", major, minor);
|
---|
988 |
|
---|
989 | return MAKEDWORD_VERSION(major, minor);
|
---|
990 | }
|
---|
991 |
|
---|
992 | static enum wined3d_gl_vendor wined3d_guess_gl_vendor(struct wined3d_gl_info *gl_info, const char *gl_vendor_string, const char *gl_renderer)
|
---|
993 | {
|
---|
994 |
|
---|
995 | /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
|
---|
996 | * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
|
---|
997 | * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
|
---|
998 | *
|
---|
999 | * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
|
---|
1000 | * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
|
---|
1001 | * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
|
---|
1002 | * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
|
---|
1003 | * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
|
---|
1004 | * the chance that other implementations support them is rather small since Win32 QuickTime uses
|
---|
1005 | * DirectDraw, not OpenGL. */
|
---|
1006 | if (gl_info->supported[APPLE_FENCE]
|
---|
1007 | && gl_info->supported[APPLE_CLIENT_STORAGE]
|
---|
1008 | && gl_info->supported[APPLE_FLUSH_RENDER]
|
---|
1009 | && gl_info->supported[APPLE_YCBCR_422])
|
---|
1010 | return GL_VENDOR_APPLE;
|
---|
1011 |
|
---|
1012 | if (strstr(gl_vendor_string, "NVIDIA"))
|
---|
1013 | return GL_VENDOR_NVIDIA;
|
---|
1014 |
|
---|
1015 | if (strstr(gl_vendor_string, "ATI"))
|
---|
1016 | return GL_VENDOR_FGLRX;
|
---|
1017 |
|
---|
1018 | if (strstr(gl_vendor_string, "Intel(R)")
|
---|
1019 | || strstr(gl_renderer, "Intel(R)")
|
---|
1020 | || strstr(gl_vendor_string, "Intel Inc."))
|
---|
1021 | {
|
---|
1022 | if (strstr(gl_renderer, "Mesa"))
|
---|
1023 | return GL_VENDOR_MESA;
|
---|
1024 | return GL_VENDOR_INTEL;
|
---|
1025 | }
|
---|
1026 |
|
---|
1027 | if (strstr(gl_vendor_string, "Mesa")
|
---|
1028 | || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
|
---|
1029 | || strstr(gl_vendor_string, "DRI R300 Project")
|
---|
1030 | || strstr(gl_vendor_string, "X.Org R300 Project")
|
---|
1031 | || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
|
---|
1032 | || strstr(gl_vendor_string, "VMware, Inc.")
|
---|
1033 | || strstr(gl_renderer, "Mesa")
|
---|
1034 | || strstr(gl_renderer, "Gallium"))
|
---|
1035 | return GL_VENDOR_MESA;
|
---|
1036 |
|
---|
1037 | FIXME_(d3d_caps)("Received unrecognized GL_VENDOR %s. Returning GL_VENDOR_UNKNOWN.\n",
|
---|
1038 | debugstr_a(gl_vendor_string));
|
---|
1039 |
|
---|
1040 | return GL_VENDOR_UNKNOWN;
|
---|
1041 | }
|
---|
1042 |
|
---|
1043 | static enum wined3d_pci_vendor wined3d_guess_card_vendor(const char *gl_vendor_string, const char *gl_renderer)
|
---|
1044 | {
|
---|
1045 | if (strstr(gl_vendor_string, "NVIDIA"))
|
---|
1046 | return HW_VENDOR_NVIDIA;
|
---|
1047 |
|
---|
1048 | if (strstr(gl_vendor_string, "ATI")
|
---|
1049 | || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
|
---|
1050 | || strstr(gl_vendor_string, "X.Org R300 Project")
|
---|
1051 | || strstr(gl_vendor_string, "DRI R300 Project"))
|
---|
1052 | return HW_VENDOR_ATI;
|
---|
1053 |
|
---|
1054 | if (strstr(gl_vendor_string, "Intel(R)")
|
---|
1055 | || strstr(gl_renderer, "Intel(R)")
|
---|
1056 | || strstr(gl_vendor_string, "Intel Inc."))
|
---|
1057 | return HW_VENDOR_INTEL;
|
---|
1058 |
|
---|
1059 | if (strstr(gl_vendor_string, "Mesa")
|
---|
1060 | || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
|
---|
1061 | || strstr(gl_vendor_string, "VMware, Inc."))
|
---|
1062 | return HW_VENDOR_SOFTWARE;
|
---|
1063 |
|
---|
1064 | FIXME_(d3d_caps)("Received unrecognized GL_VENDOR %s. Returning HW_VENDOR_NVIDIA.\n", debugstr_a(gl_vendor_string));
|
---|
1065 |
|
---|
1066 | return HW_VENDOR_NVIDIA;
|
---|
1067 | }
|
---|
1068 |
|
---|
1069 |
|
---|
1070 |
|
---|
1071 | static enum wined3d_pci_device select_card_nvidia_binary(const struct wined3d_gl_info *gl_info,
|
---|
1072 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1073 | {
|
---|
1074 | #ifndef VBOX_WITH_WDDM
|
---|
1075 | if (WINE_D3D10_CAPABLE(gl_info))
|
---|
1076 | #endif
|
---|
1077 | {
|
---|
1078 | /* Geforce 200 - highend */
|
---|
1079 | if (strstr(gl_renderer, "GTX 280")
|
---|
1080 | || strstr(gl_renderer, "GTX 285")
|
---|
1081 | || strstr(gl_renderer, "GTX 295"))
|
---|
1082 | {
|
---|
1083 | *vidmem = 1024;
|
---|
1084 | return CARD_NVIDIA_GEFORCE_GTX280;
|
---|
1085 | }
|
---|
1086 |
|
---|
1087 | /* Geforce 200 - midend high */
|
---|
1088 | if (strstr(gl_renderer, "GTX 275"))
|
---|
1089 | {
|
---|
1090 | *vidmem = 896;
|
---|
1091 | return CARD_NVIDIA_GEFORCE_GTX275;
|
---|
1092 | }
|
---|
1093 |
|
---|
1094 | /* Geforce 200 - midend */
|
---|
1095 | if (strstr(gl_renderer, "GTX 260"))
|
---|
1096 | {
|
---|
1097 | *vidmem = 1024;
|
---|
1098 | return CARD_NVIDIA_GEFORCE_GTX260;
|
---|
1099 | }
|
---|
1100 | /* Geforce 200 - midend */
|
---|
1101 | if (strstr(gl_renderer, "GT 240"))
|
---|
1102 | {
|
---|
1103 | *vidmem = 512;
|
---|
1104 | return CARD_NVIDIA_GEFORCE_GT240;
|
---|
1105 | }
|
---|
1106 |
|
---|
1107 | /* Geforce9 - highend / Geforce 200 - midend (GTS 150/250 are based on the same core) */
|
---|
1108 | if (strstr(gl_renderer, "9800")
|
---|
1109 | || strstr(gl_renderer, "GTS 150")
|
---|
1110 | || strstr(gl_renderer, "GTS 250"))
|
---|
1111 | {
|
---|
1112 | *vidmem = 512;
|
---|
1113 | return CARD_NVIDIA_GEFORCE_9800GT;
|
---|
1114 | }
|
---|
1115 |
|
---|
1116 | /* Geforce9 - midend */
|
---|
1117 | if (strstr(gl_renderer, "9600"))
|
---|
1118 | {
|
---|
1119 | *vidmem = 384; /* The 9600GSO has 384MB, the 9600GT has 512-1024MB */
|
---|
1120 | return CARD_NVIDIA_GEFORCE_9600GT;
|
---|
1121 | }
|
---|
1122 |
|
---|
1123 | /* Geforce9 - midend low / Geforce 200 - low */
|
---|
1124 | if (strstr(gl_renderer, "9500")
|
---|
1125 | || strstr(gl_renderer, "GT 120")
|
---|
1126 | || strstr(gl_renderer, "GT 130"))
|
---|
1127 | {
|
---|
1128 | *vidmem = 256; /* The 9500GT has 256-1024MB */
|
---|
1129 | return CARD_NVIDIA_GEFORCE_9500GT;
|
---|
1130 | }
|
---|
1131 |
|
---|
1132 | /* Geforce9 - lowend */
|
---|
1133 | if (strstr(gl_renderer, "9400"))
|
---|
1134 | {
|
---|
1135 | *vidmem = 256; /* The 9400GT has 256-1024MB */
|
---|
1136 | return CARD_NVIDIA_GEFORCE_9400GT;
|
---|
1137 | }
|
---|
1138 |
|
---|
1139 | /* Geforce9 - lowend low */
|
---|
1140 | if (strstr(gl_renderer, "9100")
|
---|
1141 | || strstr(gl_renderer, "9200")
|
---|
1142 | || strstr(gl_renderer, "9300")
|
---|
1143 | || strstr(gl_renderer, "G 100"))
|
---|
1144 | {
|
---|
1145 | *vidmem = 256; /* The 9100-9300 cards have 256MB */
|
---|
1146 | return CARD_NVIDIA_GEFORCE_9200;
|
---|
1147 | }
|
---|
1148 |
|
---|
1149 | /* Geforce8 - highend */
|
---|
1150 | if (strstr(gl_renderer, "8800"))
|
---|
1151 | {
|
---|
1152 | *vidmem = 320; /* The 8800GTS uses 320MB, a 8800GTX can have 768MB */
|
---|
1153 | return CARD_NVIDIA_GEFORCE_8800GTS;
|
---|
1154 | }
|
---|
1155 |
|
---|
1156 | /* Geforce8 - midend mobile */
|
---|
1157 | if (strstr(gl_renderer, "8600 M"))
|
---|
1158 | {
|
---|
1159 | *vidmem = 512;
|
---|
1160 | return CARD_NVIDIA_GEFORCE_8600MGT;
|
---|
1161 | }
|
---|
1162 |
|
---|
1163 | /* Geforce8 - midend */
|
---|
1164 | if (strstr(gl_renderer, "8600")
|
---|
1165 | || strstr(gl_renderer, "8700"))
|
---|
1166 | {
|
---|
1167 | *vidmem = 256;
|
---|
1168 | return CARD_NVIDIA_GEFORCE_8600GT;
|
---|
1169 | }
|
---|
1170 |
|
---|
1171 | /* Geforce8 - lowend */
|
---|
1172 | if (strstr(gl_renderer, "8100")
|
---|
1173 | || strstr(gl_renderer, "8200")
|
---|
1174 | || strstr(gl_renderer, "8300")
|
---|
1175 | || strstr(gl_renderer, "8400")
|
---|
1176 | || strstr(gl_renderer, "8500"))
|
---|
1177 | {
|
---|
1178 | *vidmem = 128; /* 128-256MB for a 8300, 256-512MB for a 8400 */
|
---|
1179 | return CARD_NVIDIA_GEFORCE_8300GS;
|
---|
1180 | }
|
---|
1181 |
|
---|
1182 | /* Geforce8-compatible fall back if the GPU is not in the list yet */
|
---|
1183 | *vidmem = 128;
|
---|
1184 | return CARD_NVIDIA_GEFORCE_8300GS;
|
---|
1185 | }
|
---|
1186 |
|
---|
1187 | /* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
|
---|
1188 | * shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
|
---|
1189 | */
|
---|
1190 | if (WINE_D3D9_CAPABLE(gl_info) && gl_info->supported[NV_VERTEX_PROGRAM3])
|
---|
1191 | {
|
---|
1192 | /* Geforce7 - highend */
|
---|
1193 | if (strstr(gl_renderer, "7800")
|
---|
1194 | || strstr(gl_renderer, "7900")
|
---|
1195 | || strstr(gl_renderer, "7950")
|
---|
1196 | || strstr(gl_renderer, "Quadro FX 4")
|
---|
1197 | || strstr(gl_renderer, "Quadro FX 5"))
|
---|
1198 | {
|
---|
1199 | *vidmem = 256; /* A 7800GT uses 256MB while highend 7900 cards can use 512MB */
|
---|
1200 | return CARD_NVIDIA_GEFORCE_7800GT;
|
---|
1201 | }
|
---|
1202 |
|
---|
1203 | /* Geforce7 midend */
|
---|
1204 | if (strstr(gl_renderer, "7600")
|
---|
1205 | || strstr(gl_renderer, "7700"))
|
---|
1206 | {
|
---|
1207 | *vidmem = 256; /* The 7600 uses 256-512MB */
|
---|
1208 | return CARD_NVIDIA_GEFORCE_7600;
|
---|
1209 | }
|
---|
1210 |
|
---|
1211 | /* Geforce7 lower medium */
|
---|
1212 | if (strstr(gl_renderer, "7400"))
|
---|
1213 | {
|
---|
1214 | *vidmem = 256; /* The 7400 uses 256-512MB */
|
---|
1215 | return CARD_NVIDIA_GEFORCE_7400;
|
---|
1216 | }
|
---|
1217 |
|
---|
1218 | /* Geforce7 lowend */
|
---|
1219 | if (strstr(gl_renderer, "7300"))
|
---|
1220 | {
|
---|
1221 | *vidmem = 256; /* Mac Pros with this card have 256 MB */
|
---|
1222 | return CARD_NVIDIA_GEFORCE_7300;
|
---|
1223 | }
|
---|
1224 |
|
---|
1225 | /* Geforce6 highend */
|
---|
1226 | if (strstr(gl_renderer, "6800"))
|
---|
1227 | {
|
---|
1228 | *vidmem = 128; /* The 6800 uses 128-256MB, the 7600 uses 256-512MB */
|
---|
1229 | return CARD_NVIDIA_GEFORCE_6800;
|
---|
1230 | }
|
---|
1231 |
|
---|
1232 | /* Geforce6 - midend */
|
---|
1233 | if (strstr(gl_renderer, "6600")
|
---|
1234 | || strstr(gl_renderer, "6610")
|
---|
1235 | || strstr(gl_renderer, "6700"))
|
---|
1236 | {
|
---|
1237 | *vidmem = 128; /* A 6600GT has 128-256MB */
|
---|
1238 | return CARD_NVIDIA_GEFORCE_6600GT;
|
---|
1239 | }
|
---|
1240 |
|
---|
1241 | /* Geforce6/7 lowend */
|
---|
1242 | *vidmem = 64; /* */
|
---|
1243 | return CARD_NVIDIA_GEFORCE_6200; /* Geforce 6100/6150/6200/7300/7400/7500 */
|
---|
1244 | }
|
---|
1245 |
|
---|
1246 | if (WINE_D3D9_CAPABLE(gl_info))
|
---|
1247 | {
|
---|
1248 | /* GeforceFX - highend */
|
---|
1249 | if (strstr(gl_renderer, "5800")
|
---|
1250 | || strstr(gl_renderer, "5900")
|
---|
1251 | || strstr(gl_renderer, "5950")
|
---|
1252 | || strstr(gl_renderer, "Quadro FX"))
|
---|
1253 | {
|
---|
1254 | *vidmem = 256; /* 5800-5900 cards use 256MB */
|
---|
1255 | return CARD_NVIDIA_GEFORCEFX_5800;
|
---|
1256 | }
|
---|
1257 |
|
---|
1258 | /* GeforceFX - midend */
|
---|
1259 | if (strstr(gl_renderer, "5600")
|
---|
1260 | || strstr(gl_renderer, "5650")
|
---|
1261 | || strstr(gl_renderer, "5700")
|
---|
1262 | || strstr(gl_renderer, "5750"))
|
---|
1263 | {
|
---|
1264 | *vidmem = 128; /* A 5600 uses 128-256MB */
|
---|
1265 | return CARD_NVIDIA_GEFORCEFX_5600;
|
---|
1266 | }
|
---|
1267 |
|
---|
1268 | /* GeforceFX - lowend */
|
---|
1269 | *vidmem = 64; /* Normal FX5200 cards use 64-256MB; laptop (non-standard) can have less */
|
---|
1270 | return CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
|
---|
1271 | }
|
---|
1272 |
|
---|
1273 | if (WINE_D3D8_CAPABLE(gl_info))
|
---|
1274 | {
|
---|
1275 | if (strstr(gl_renderer, "GeForce4 Ti") || strstr(gl_renderer, "Quadro4"))
|
---|
1276 | {
|
---|
1277 | *vidmem = 64; /* Geforce4 Ti cards have 64-128MB */
|
---|
1278 | return CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
|
---|
1279 | }
|
---|
1280 |
|
---|
1281 | *vidmem = 64; /* Geforce3 cards have 64-128MB */
|
---|
1282 | return CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
|
---|
1283 | }
|
---|
1284 |
|
---|
1285 | if (WINE_D3D7_CAPABLE(gl_info))
|
---|
1286 | {
|
---|
1287 | if (strstr(gl_renderer, "GeForce4 MX"))
|
---|
1288 | {
|
---|
1289 | /* Most Geforce4MX GPUs have at least 64MB of memory, some
|
---|
1290 | * early models had 32MB but most have 64MB or even 128MB. */
|
---|
1291 | *vidmem = 64;
|
---|
1292 | return CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
|
---|
1293 | }
|
---|
1294 |
|
---|
1295 | if (strstr(gl_renderer, "GeForce2 MX") || strstr(gl_renderer, "Quadro2 MXR"))
|
---|
1296 | {
|
---|
1297 | *vidmem = 32; /* Geforce2MX GPUs have 32-64MB of video memory */
|
---|
1298 | return CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
|
---|
1299 | }
|
---|
1300 |
|
---|
1301 | if (strstr(gl_renderer, "GeForce2") || strstr(gl_renderer, "Quadro2"))
|
---|
1302 | {
|
---|
1303 | *vidmem = 32; /* Geforce2 GPUs have 32-64MB of video memory */
|
---|
1304 | return CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
|
---|
1305 | }
|
---|
1306 |
|
---|
1307 | /* Most Geforce1 cards have 32MB, there are also some rare 16
|
---|
1308 | * and 64MB (Dell) models. */
|
---|
1309 | *vidmem = 32;
|
---|
1310 | return CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
|
---|
1311 | }
|
---|
1312 |
|
---|
1313 | if (strstr(gl_renderer, "TNT2"))
|
---|
1314 | {
|
---|
1315 | *vidmem = 32; /* Most TNT2 boards have 32MB, though there are 16MB boards too */
|
---|
1316 | return CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
|
---|
1317 | }
|
---|
1318 |
|
---|
1319 | *vidmem = 16; /* Most TNT boards have 16MB, some rare models have 8MB */
|
---|
1320 | return CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
|
---|
1321 |
|
---|
1322 | }
|
---|
1323 |
|
---|
1324 | static enum wined3d_pci_device select_card_ati_binary(const struct wined3d_gl_info *gl_info,
|
---|
1325 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1326 | {
|
---|
1327 | /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
|
---|
1328 | *
|
---|
1329 | * Beware: renderer string do not match exact card model,
|
---|
1330 | * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
|
---|
1331 | #ifndef VBOX_WITH_WDDM
|
---|
1332 | if (WINE_D3D10_CAPABLE(gl_info))
|
---|
1333 | #endif
|
---|
1334 | {
|
---|
1335 | /* Radeon EG CYPRESS XT / PRO HD5800 - highend */
|
---|
1336 | if (strstr(gl_renderer, "HD 5800") /* Radeon EG CYPRESS HD58xx generic renderer string */
|
---|
1337 | || strstr(gl_renderer, "HD 5850") /* Radeon EG CYPRESS XT */
|
---|
1338 | || strstr(gl_renderer, "HD 5870")) /* Radeon EG CYPRESS PRO */
|
---|
1339 | {
|
---|
1340 | *vidmem = 1024; /* note: HD58xx cards use 1024MB */
|
---|
1341 | return CARD_ATI_RADEON_HD5800;
|
---|
1342 | }
|
---|
1343 |
|
---|
1344 | /* Radeon EG JUNIPER XT / LE HD5700 - midend */
|
---|
1345 | if (strstr(gl_renderer, "HD 5700") /* Radeon EG JUNIPER HD57xx generic renderer string */
|
---|
1346 | || strstr(gl_renderer, "HD 5750") /* Radeon EG JUNIPER LE */
|
---|
1347 | || strstr(gl_renderer, "HD 5770")) /* Radeon EG JUNIPER XT */
|
---|
1348 | {
|
---|
1349 | *vidmem = 512; /* note: HD5770 cards use 1024MB and HD5750 cards use 512MB or 1024MB */
|
---|
1350 | return CARD_ATI_RADEON_HD5700;
|
---|
1351 | }
|
---|
1352 |
|
---|
1353 | /* Radeon R7xx HD4800 - highend */
|
---|
1354 | if (strstr(gl_renderer, "HD 4800") /* Radeon RV7xx HD48xx generic renderer string */
|
---|
1355 | || strstr(gl_renderer, "HD 4830") /* Radeon RV770 */
|
---|
1356 | || strstr(gl_renderer, "HD 4850") /* Radeon RV770 */
|
---|
1357 | || strstr(gl_renderer, "HD 4870") /* Radeon RV770 */
|
---|
1358 | || strstr(gl_renderer, "HD 4890")) /* Radeon RV790 */
|
---|
1359 | {
|
---|
1360 | *vidmem = 512; /* note: HD4890 cards use 1024MB */
|
---|
1361 | return CARD_ATI_RADEON_HD4800;
|
---|
1362 | }
|
---|
1363 |
|
---|
1364 | /* Radeon R740 HD4700 - midend */
|
---|
1365 | if (strstr(gl_renderer, "HD 4700") /* Radeon RV770 */
|
---|
1366 | || strstr(gl_renderer, "HD 4770")) /* Radeon RV740 */
|
---|
1367 | {
|
---|
1368 | *vidmem = 512;
|
---|
1369 | return CARD_ATI_RADEON_HD4700;
|
---|
1370 | }
|
---|
1371 |
|
---|
1372 | /* Radeon R730 HD4600 - midend */
|
---|
1373 | if (strstr(gl_renderer, "HD 4600") /* Radeon RV730 */
|
---|
1374 | || strstr(gl_renderer, "HD 4650") /* Radeon RV730 */
|
---|
1375 | || strstr(gl_renderer, "HD 4670")) /* Radeon RV730 */
|
---|
1376 | {
|
---|
1377 | *vidmem = 512;
|
---|
1378 | return CARD_ATI_RADEON_HD4600;
|
---|
1379 | }
|
---|
1380 |
|
---|
1381 | /* Radeon R710 HD4500/HD4350 - lowend */
|
---|
1382 | if (strstr(gl_renderer, "HD 4350") /* Radeon RV710 */
|
---|
1383 | || strstr(gl_renderer, "HD 4550")) /* Radeon RV710 */
|
---|
1384 | {
|
---|
1385 | *vidmem = 256;
|
---|
1386 | return CARD_ATI_RADEON_HD4350;
|
---|
1387 | }
|
---|
1388 |
|
---|
1389 | /* Radeon R6xx HD2900/HD3800 - highend */
|
---|
1390 | if (strstr(gl_renderer, "HD 2900")
|
---|
1391 | || strstr(gl_renderer, "HD 3870")
|
---|
1392 | || strstr(gl_renderer, "HD 3850"))
|
---|
1393 | {
|
---|
1394 | *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
|
---|
1395 | return CARD_ATI_RADEON_HD2900;
|
---|
1396 | }
|
---|
1397 |
|
---|
1398 | /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
|
---|
1399 | if (strstr(gl_renderer, "HD 2600")
|
---|
1400 | || strstr(gl_renderer, "HD 3830")
|
---|
1401 | || strstr(gl_renderer, "HD 3690")
|
---|
1402 | || strstr(gl_renderer, "HD 3650"))
|
---|
1403 | {
|
---|
1404 | *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
|
---|
1405 | return CARD_ATI_RADEON_HD2600;
|
---|
1406 | }
|
---|
1407 |
|
---|
1408 | /* Radeon R6xx HD2350/HD2400/HD3400 - lowend
|
---|
1409 | * Note HD2300=DX9, HD2350=DX10 */
|
---|
1410 | if (strstr(gl_renderer, "HD 2350")
|
---|
1411 | || strstr(gl_renderer, "HD 2400")
|
---|
1412 | || strstr(gl_renderer, "HD 3470")
|
---|
1413 | || strstr(gl_renderer, "HD 3450")
|
---|
1414 | || strstr(gl_renderer, "HD 3430")
|
---|
1415 | || strstr(gl_renderer, "HD 3400"))
|
---|
1416 | {
|
---|
1417 | *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
|
---|
1418 | return CARD_ATI_RADEON_HD2350;
|
---|
1419 | }
|
---|
1420 |
|
---|
1421 | /* Radeon R6xx/R7xx integrated */
|
---|
1422 | if (strstr(gl_renderer, "HD 3100")
|
---|
1423 | || strstr(gl_renderer, "HD 3200")
|
---|
1424 | || strstr(gl_renderer, "HD 3300"))
|
---|
1425 | {
|
---|
1426 | *vidmem = 128; /* 128MB */
|
---|
1427 | return CARD_ATI_RADEON_HD3200;
|
---|
1428 | }
|
---|
1429 |
|
---|
1430 | /* Default for when no GPU has been found */
|
---|
1431 | *vidmem = 128; /* 128MB */
|
---|
1432 | return CARD_ATI_RADEON_HD3200;
|
---|
1433 | }
|
---|
1434 |
|
---|
1435 | if (WINE_D3D8_CAPABLE(gl_info))
|
---|
1436 | {
|
---|
1437 | /* Radeon R5xx */
|
---|
1438 | if (strstr(gl_renderer, "X1600")
|
---|
1439 | || strstr(gl_renderer, "X1650")
|
---|
1440 | || strstr(gl_renderer, "X1800")
|
---|
1441 | || strstr(gl_renderer, "X1900")
|
---|
1442 | || strstr(gl_renderer, "X1950"))
|
---|
1443 | {
|
---|
1444 | *vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
|
---|
1445 | return CARD_ATI_RADEON_X1600;
|
---|
1446 | }
|
---|
1447 |
|
---|
1448 | /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300/X2500/HD2300 (lowend R5xx)
|
---|
1449 | * Note X2300/X2500/HD2300 are R5xx GPUs with a 2xxx naming but they are still DX9-only */
|
---|
1450 | if (strstr(gl_renderer, "X700")
|
---|
1451 | || strstr(gl_renderer, "X800")
|
---|
1452 | || strstr(gl_renderer, "X850")
|
---|
1453 | || strstr(gl_renderer, "X1300")
|
---|
1454 | || strstr(gl_renderer, "X1400")
|
---|
1455 | || strstr(gl_renderer, "X1450")
|
---|
1456 | || strstr(gl_renderer, "X1550")
|
---|
1457 | || strstr(gl_renderer, "X2300")
|
---|
1458 | || strstr(gl_renderer, "X2500")
|
---|
1459 | || strstr(gl_renderer, "HD 2300")
|
---|
1460 | )
|
---|
1461 | {
|
---|
1462 | *vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
|
---|
1463 | return CARD_ATI_RADEON_X700;
|
---|
1464 | }
|
---|
1465 |
|
---|
1466 | /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
|
---|
1467 | if (strstr(gl_renderer, "Radeon Xpress"))
|
---|
1468 | {
|
---|
1469 | *vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
|
---|
1470 | return CARD_ATI_RADEON_XPRESS_200M;
|
---|
1471 | }
|
---|
1472 |
|
---|
1473 | /* Radeon R3xx */
|
---|
1474 | *vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
|
---|
1475 | return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
|
---|
1476 | }
|
---|
1477 |
|
---|
1478 | if (WINE_D3D8_CAPABLE(gl_info))
|
---|
1479 | {
|
---|
1480 | *vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
|
---|
1481 | return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
|
---|
1482 | }
|
---|
1483 |
|
---|
1484 | if (WINE_D3D7_CAPABLE(gl_info))
|
---|
1485 | {
|
---|
1486 | *vidmem = 32; /* There are models with up to 64MB */
|
---|
1487 | return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
|
---|
1488 | }
|
---|
1489 |
|
---|
1490 | *vidmem = 16; /* There are 16-32MB models */
|
---|
1491 | return CARD_ATI_RAGE_128PRO;
|
---|
1492 |
|
---|
1493 | }
|
---|
1494 |
|
---|
1495 | static enum wined3d_pci_device select_card_intel_binary(const struct wined3d_gl_info *gl_info,
|
---|
1496 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1497 | {
|
---|
1498 | if (strstr(gl_renderer, "X3100"))
|
---|
1499 | {
|
---|
1500 | /* MacOS calls the card GMA X3100, Google findings also suggest the name GM965 */
|
---|
1501 | *vidmem = 128;
|
---|
1502 | return CARD_INTEL_X3100;
|
---|
1503 | }
|
---|
1504 |
|
---|
1505 | if (strstr(gl_renderer, "GMA 950") || strstr(gl_renderer, "945GM"))
|
---|
1506 | {
|
---|
1507 | /* MacOS calls the card GMA 950, but everywhere else the PCI ID is named 945GM */
|
---|
1508 | *vidmem = 64;
|
---|
1509 | return CARD_INTEL_I945GM;
|
---|
1510 | }
|
---|
1511 |
|
---|
1512 | if (strstr(gl_renderer, "915GM")) return CARD_INTEL_I915GM;
|
---|
1513 | if (strstr(gl_renderer, "915G")) return CARD_INTEL_I915G;
|
---|
1514 | if (strstr(gl_renderer, "865G")) return CARD_INTEL_I865G;
|
---|
1515 | if (strstr(gl_renderer, "855G")) return CARD_INTEL_I855G;
|
---|
1516 | if (strstr(gl_renderer, "830G")) return CARD_INTEL_I830G;
|
---|
1517 | return CARD_INTEL_I915G;
|
---|
1518 |
|
---|
1519 | }
|
---|
1520 |
|
---|
1521 | static enum wined3d_pci_device select_card_ati_mesa(const struct wined3d_gl_info *gl_info,
|
---|
1522 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1523 | {
|
---|
1524 | /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
|
---|
1525 | *
|
---|
1526 | * Beware: renderer string do not match exact card model,
|
---|
1527 | * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
|
---|
1528 | if (strstr(gl_renderer, "Gallium"))
|
---|
1529 | {
|
---|
1530 | /* Radeon R7xx HD4800 - highend */
|
---|
1531 | if (strstr(gl_renderer, "R700") /* Radeon R7xx HD48xx generic renderer string */
|
---|
1532 | || strstr(gl_renderer, "RV770") /* Radeon RV770 */
|
---|
1533 | || strstr(gl_renderer, "RV790")) /* Radeon RV790 */
|
---|
1534 | {
|
---|
1535 | *vidmem = 512; /* note: HD4890 cards use 1024MB */
|
---|
1536 | return CARD_ATI_RADEON_HD4800;
|
---|
1537 | }
|
---|
1538 |
|
---|
1539 | /* Radeon R740 HD4700 - midend */
|
---|
1540 | if (strstr(gl_renderer, "RV740")) /* Radeon RV740 */
|
---|
1541 | {
|
---|
1542 | *vidmem = 512;
|
---|
1543 | return CARD_ATI_RADEON_HD4700;
|
---|
1544 | }
|
---|
1545 |
|
---|
1546 | /* Radeon R730 HD4600 - midend */
|
---|
1547 | if (strstr(gl_renderer, "RV730")) /* Radeon RV730 */
|
---|
1548 | {
|
---|
1549 | *vidmem = 512;
|
---|
1550 | return CARD_ATI_RADEON_HD4600;
|
---|
1551 | }
|
---|
1552 |
|
---|
1553 | /* Radeon R710 HD4500/HD4350 - lowend */
|
---|
1554 | if (strstr(gl_renderer, "RV710")) /* Radeon RV710 */
|
---|
1555 | {
|
---|
1556 | *vidmem = 256;
|
---|
1557 | return CARD_ATI_RADEON_HD4350;
|
---|
1558 | }
|
---|
1559 |
|
---|
1560 | /* Radeon R6xx HD2900/HD3800 - highend */
|
---|
1561 | if (strstr(gl_renderer, "R600")
|
---|
1562 | || strstr(gl_renderer, "RV670")
|
---|
1563 | || strstr(gl_renderer, "R680"))
|
---|
1564 | {
|
---|
1565 | *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
|
---|
1566 | return CARD_ATI_RADEON_HD2900;
|
---|
1567 | }
|
---|
1568 |
|
---|
1569 | /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
|
---|
1570 | if (strstr(gl_renderer, "RV630")
|
---|
1571 | || strstr(gl_renderer, "RV635"))
|
---|
1572 | {
|
---|
1573 | *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
|
---|
1574 | return CARD_ATI_RADEON_HD2600;
|
---|
1575 | }
|
---|
1576 |
|
---|
1577 | /* Radeon R6xx HD2350/HD2400/HD3400 - lowend */
|
---|
1578 | if (strstr(gl_renderer, "RV610")
|
---|
1579 | || strstr(gl_renderer, "RV620"))
|
---|
1580 | {
|
---|
1581 | *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
|
---|
1582 | return CARD_ATI_RADEON_HD2350;
|
---|
1583 | }
|
---|
1584 |
|
---|
1585 | /* Radeon R6xx/R7xx integrated */
|
---|
1586 | if (strstr(gl_renderer, "RS780")
|
---|
1587 | || strstr(gl_renderer, "RS880"))
|
---|
1588 | {
|
---|
1589 | *vidmem = 128; /* 128MB */
|
---|
1590 | return CARD_ATI_RADEON_HD3200;
|
---|
1591 | }
|
---|
1592 |
|
---|
1593 | /* Radeon R5xx */
|
---|
1594 | if (strstr(gl_renderer, "RV530")
|
---|
1595 | || strstr(gl_renderer, "RV535")
|
---|
1596 | || strstr(gl_renderer, "RV560")
|
---|
1597 | || strstr(gl_renderer, "R520")
|
---|
1598 | || strstr(gl_renderer, "RV570")
|
---|
1599 | || strstr(gl_renderer, "R580"))
|
---|
1600 | {
|
---|
1601 | *vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
|
---|
1602 | return CARD_ATI_RADEON_X1600;
|
---|
1603 | }
|
---|
1604 |
|
---|
1605 | /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300 (lowend R5xx) */
|
---|
1606 | if (strstr(gl_renderer, "R410")
|
---|
1607 | || strstr(gl_renderer, "R420")
|
---|
1608 | || strstr(gl_renderer, "R423")
|
---|
1609 | || strstr(gl_renderer, "R430")
|
---|
1610 | || strstr(gl_renderer, "R480")
|
---|
1611 | || strstr(gl_renderer, "R481")
|
---|
1612 | || strstr(gl_renderer, "RV410")
|
---|
1613 | || strstr(gl_renderer, "RV515")
|
---|
1614 | || strstr(gl_renderer, "RV516"))
|
---|
1615 | {
|
---|
1616 | *vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
|
---|
1617 | return CARD_ATI_RADEON_X700;
|
---|
1618 | }
|
---|
1619 |
|
---|
1620 | /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
|
---|
1621 | if (strstr(gl_renderer, "RS400")
|
---|
1622 | || strstr(gl_renderer, "RS480")
|
---|
1623 | || strstr(gl_renderer, "RS482")
|
---|
1624 | || strstr(gl_renderer, "RS485")
|
---|
1625 | || strstr(gl_renderer, "RS600")
|
---|
1626 | || strstr(gl_renderer, "RS690")
|
---|
1627 | || strstr(gl_renderer, "RS740"))
|
---|
1628 | {
|
---|
1629 | *vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
|
---|
1630 | return CARD_ATI_RADEON_XPRESS_200M;
|
---|
1631 | }
|
---|
1632 |
|
---|
1633 | /* Radeon R3xx */
|
---|
1634 | if (strstr(gl_renderer, "R300")
|
---|
1635 | || strstr(gl_renderer, "RV350")
|
---|
1636 | || strstr(gl_renderer, "RV351")
|
---|
1637 | || strstr(gl_renderer, "RV360")
|
---|
1638 | || strstr(gl_renderer, "RV370")
|
---|
1639 | || strstr(gl_renderer, "R350")
|
---|
1640 | || strstr(gl_renderer, "R360"))
|
---|
1641 | {
|
---|
1642 | *vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
|
---|
1643 | return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
|
---|
1644 | }
|
---|
1645 | }
|
---|
1646 |
|
---|
1647 | if (WINE_D3D9_CAPABLE(gl_info))
|
---|
1648 | {
|
---|
1649 | /* Radeon R7xx HD4800 - highend */
|
---|
1650 | if (strstr(gl_renderer, "(R700") /* Radeon R7xx HD48xx generic renderer string */
|
---|
1651 | || strstr(gl_renderer, "(RV770") /* Radeon RV770 */
|
---|
1652 | || strstr(gl_renderer, "(RV790")) /* Radeon RV790 */
|
---|
1653 | {
|
---|
1654 | *vidmem = 512; /* note: HD4890 cards use 1024MB */
|
---|
1655 | return CARD_ATI_RADEON_HD4800;
|
---|
1656 | }
|
---|
1657 |
|
---|
1658 | /* Radeon R740 HD4700 - midend */
|
---|
1659 | if (strstr(gl_renderer, "(RV740")) /* Radeon RV740 */
|
---|
1660 | {
|
---|
1661 | *vidmem = 512;
|
---|
1662 | return CARD_ATI_RADEON_HD4700;
|
---|
1663 | }
|
---|
1664 |
|
---|
1665 | /* Radeon R730 HD4600 - midend */
|
---|
1666 | if (strstr(gl_renderer, "(RV730")) /* Radeon RV730 */
|
---|
1667 | {
|
---|
1668 | *vidmem = 512;
|
---|
1669 | return CARD_ATI_RADEON_HD4600;
|
---|
1670 | }
|
---|
1671 |
|
---|
1672 | /* Radeon R710 HD4500/HD4350 - lowend */
|
---|
1673 | if (strstr(gl_renderer, "(RV710")) /* Radeon RV710 */
|
---|
1674 | {
|
---|
1675 | *vidmem = 256;
|
---|
1676 | return CARD_ATI_RADEON_HD4350;
|
---|
1677 | }
|
---|
1678 |
|
---|
1679 | /* Radeon R6xx HD2900/HD3800 - highend */
|
---|
1680 | if (strstr(gl_renderer, "(R600")
|
---|
1681 | || strstr(gl_renderer, "(RV670")
|
---|
1682 | || strstr(gl_renderer, "(R680"))
|
---|
1683 | {
|
---|
1684 | *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
|
---|
1685 | return CARD_ATI_RADEON_HD2900;
|
---|
1686 | }
|
---|
1687 |
|
---|
1688 | /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
|
---|
1689 | if (strstr(gl_renderer, "(RV630")
|
---|
1690 | || strstr(gl_renderer, "(RV635"))
|
---|
1691 | {
|
---|
1692 | *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
|
---|
1693 | return CARD_ATI_RADEON_HD2600;
|
---|
1694 | }
|
---|
1695 |
|
---|
1696 | /* Radeon R6xx HD2300/HD2400/HD3400 - lowend */
|
---|
1697 | if (strstr(gl_renderer, "(RV610")
|
---|
1698 | || strstr(gl_renderer, "(RV620"))
|
---|
1699 | {
|
---|
1700 | *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
|
---|
1701 | return CARD_ATI_RADEON_HD2350;
|
---|
1702 | }
|
---|
1703 |
|
---|
1704 | /* Radeon R6xx/R7xx integrated */
|
---|
1705 | if (strstr(gl_renderer, "(RS780")
|
---|
1706 | || strstr(gl_renderer, "(RS880"))
|
---|
1707 | {
|
---|
1708 | *vidmem = 128; /* 128MB */
|
---|
1709 | return CARD_ATI_RADEON_HD3200;
|
---|
1710 | }
|
---|
1711 | }
|
---|
1712 |
|
---|
1713 | if (WINE_D3D8_CAPABLE(gl_info))
|
---|
1714 | {
|
---|
1715 | *vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
|
---|
1716 | return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
|
---|
1717 | }
|
---|
1718 |
|
---|
1719 | if (WINE_D3D7_CAPABLE(gl_info))
|
---|
1720 | {
|
---|
1721 | *vidmem = 32; /* There are models with up to 64MB */
|
---|
1722 | return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
|
---|
1723 | }
|
---|
1724 |
|
---|
1725 | *vidmem = 16; /* There are 16-32MB models */
|
---|
1726 | return CARD_ATI_RAGE_128PRO;
|
---|
1727 |
|
---|
1728 | }
|
---|
1729 |
|
---|
1730 | static enum wined3d_pci_device select_card_nvidia_mesa(const struct wined3d_gl_info *gl_info,
|
---|
1731 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1732 | {
|
---|
1733 | FIXME_(d3d_caps)("Card selection not handled for Mesa Nouveau driver\n");
|
---|
1734 | #ifndef VBOX_WITH_WDDM
|
---|
1735 | if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCEFX_5600;
|
---|
1736 | #else
|
---|
1737 | /* tmp work around to disable quirk_no_np2 quirk for mesa drivers */
|
---|
1738 | if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE_6200;
|
---|
1739 | #endif
|
---|
1740 | if (WINE_D3D8_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE3;
|
---|
1741 | if (WINE_D3D7_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE;
|
---|
1742 | if (WINE_D3D6_CAPABLE(gl_info)) return CARD_NVIDIA_RIVA_TNT;
|
---|
1743 | return CARD_NVIDIA_RIVA_128;
|
---|
1744 | }
|
---|
1745 |
|
---|
1746 | static enum wined3d_pci_device select_card_intel_cmn(const struct wined3d_gl_info *gl_info,
|
---|
1747 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1748 | {
|
---|
1749 | if (strstr(gl_renderer, "HD Graphics")
|
---|
1750 | || strstr(gl_renderer, "Sandybridge"))
|
---|
1751 | return CARD_INTEL_SBHD;
|
---|
1752 | FIXME_(d3d_caps)("Card selection not handled for Windows Intel driver\n");
|
---|
1753 | return CARD_INTEL_I915G;
|
---|
1754 | }
|
---|
1755 |
|
---|
1756 | static enum wined3d_pci_device select_card_intel_mesa(const struct wined3d_gl_info *gl_info,
|
---|
1757 | const char *gl_renderer, unsigned int *vidmem)
|
---|
1758 | {
|
---|
1759 | return select_card_intel_cmn(gl_info, gl_renderer, vidmem);
|
---|
1760 | }
|
---|
1761 |
|
---|
1762 | struct vendor_card_selection
|
---|
1763 | {
|
---|
1764 | enum wined3d_gl_vendor gl_vendor;
|
---|
1765 | enum wined3d_pci_vendor card_vendor;
|
---|
1766 | const char *description; /* Description of the card selector i.e. Apple OS/X Intel */
|
---|
1767 | enum wined3d_pci_device (*select_card)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
1768 | unsigned int *vidmem );
|
---|
1769 | };
|
---|
1770 |
|
---|
1771 | static const struct vendor_card_selection vendor_card_select_table[] =
|
---|
1772 | {
|
---|
1773 | {GL_VENDOR_NVIDIA, HW_VENDOR_NVIDIA, "Nvidia binary driver", select_card_nvidia_binary},
|
---|
1774 | {GL_VENDOR_APPLE, HW_VENDOR_NVIDIA, "Apple OSX NVidia binary driver", select_card_nvidia_binary},
|
---|
1775 | {GL_VENDOR_APPLE, HW_VENDOR_ATI, "Apple OSX AMD/ATI binary driver", select_card_ati_binary},
|
---|
1776 | {GL_VENDOR_APPLE, HW_VENDOR_INTEL, "Apple OSX Intel binary driver", select_card_intel_binary},
|
---|
1777 | {GL_VENDOR_FGLRX, HW_VENDOR_ATI, "AMD/ATI binary driver", select_card_ati_binary},
|
---|
1778 | {GL_VENDOR_MESA, HW_VENDOR_ATI, "Mesa AMD/ATI driver", select_card_ati_mesa},
|
---|
1779 | {GL_VENDOR_MESA, HW_VENDOR_NVIDIA, "Mesa Nouveau driver", select_card_nvidia_mesa},
|
---|
1780 | {GL_VENDOR_MESA, HW_VENDOR_INTEL, "Mesa Intel driver", select_card_intel_mesa},
|
---|
1781 | {GL_VENDOR_INTEL, HW_VENDOR_INTEL, "Windows Intel binary driver", select_card_intel_cmn}
|
---|
1782 | };
|
---|
1783 |
|
---|
1784 |
|
---|
1785 | static enum wined3d_pci_device wined3d_guess_card(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
|
---|
1786 | enum wined3d_gl_vendor *gl_vendor, enum wined3d_pci_vendor *card_vendor, unsigned int *vidmem)
|
---|
1787 | {
|
---|
1788 | /* Above is a list of Nvidia and ATI GPUs. Both vendors have dozens of
|
---|
1789 | * different GPUs with roughly the same features. In most cases GPUs from a
|
---|
1790 | * certain family differ in clockspeeds, the amount of video memory and the
|
---|
1791 | * number of shader pipelines.
|
---|
1792 | *
|
---|
1793 | * A Direct3D device object contains the PCI id (vendor + device) of the
|
---|
1794 | * videocard which is used for rendering. Various applications use this
|
---|
1795 | * information to get a rough estimation of the features of the card and
|
---|
1796 | * some might use it for enabling 3d effects only on certain types of
|
---|
1797 | * videocards. In some cases games might even use it to work around bugs
|
---|
1798 | * which happen on certain videocards/driver combinations. The problem is
|
---|
1799 | * that OpenGL only exposes a rendering string containing the name of the
|
---|
1800 | * videocard and not the PCI id.
|
---|
1801 | *
|
---|
1802 | * Various games depend on the PCI id, so somehow we need to provide one.
|
---|
1803 | * A simple option is to parse the renderer string and translate this to
|
---|
1804 | * the right PCI id. This is a lot of work because there are more than 200
|
---|
1805 | * GPUs just for Nvidia. Various cards share the same renderer string, so
|
---|
1806 | * the amount of code might be 'small' but there are quite a number of
|
---|
1807 | * exceptions which would make this a pain to maintain. Another way would
|
---|
1808 | * be to query the PCI id from the operating system (assuming this is the
|
---|
1809 | * videocard which is used for rendering which is not always the case).
|
---|
1810 | * This would work but it is not very portable. Second it would not work
|
---|
1811 | * well in, let's say, a remote X situation in which the amount of 3d
|
---|
1812 | * features which can be used is limited.
|
---|
1813 | *
|
---|
1814 | * As said most games only use the PCI id to get an indication of the
|
---|
1815 | * capabilities of the card. It doesn't really matter if the given id is
|
---|
1816 | * the correct one if we return the id of a card with similar 3d features.
|
---|
1817 | *
|
---|
1818 | * The code below checks the OpenGL capabilities of a videocard and matches
|
---|
1819 | * that to a certain level of Direct3D functionality. Once a card passes
|
---|
1820 | * the Direct3D9 check, we know that the card (in case of Nvidia) is at
|
---|
1821 | * least a GeforceFX. To give a better estimate we do a basic check on the
|
---|
1822 | * renderer string but if that won't pass we return a default card. This
|
---|
1823 | * way is better than maintaining a full card database as even without a
|
---|
1824 | * full database we can return a card with similar features. Second the
|
---|
1825 | * size of the database can be made quite small because when you know what
|
---|
1826 | * type of 3d functionality a card has, you know to which GPU family the
|
---|
1827 | * GPU must belong. Because of this you only have to check a small part of
|
---|
1828 | * the renderer string to distinguishes between different models from that
|
---|
1829 | * family.
|
---|
1830 | *
|
---|
1831 | * The code also selects a default amount of video memory which we will
|
---|
1832 | * use for an estimation of the amount of free texture memory. In case of
|
---|
1833 | * real D3D the amount of texture memory includes video memory and system
|
---|
1834 | * memory (to be specific AGP memory or in case of PCIE TurboCache /
|
---|
1835 | * HyperMemory). We don't know how much system memory can be addressed by
|
---|
1836 | * the system but we can make a reasonable estimation about the amount of
|
---|
1837 | * video memory. If the value is slightly wrong it doesn't matter as we
|
---|
1838 | * didn't include AGP-like memory which makes the amount of addressable
|
---|
1839 | * memory higher and second OpenGL isn't that critical it moves to system
|
---|
1840 | * memory behind our backs if really needed. Note that the amount of video
|
---|
1841 | * memory can be overruled using a registry setting. */
|
---|
1842 |
|
---|
1843 | #ifndef VBOX
|
---|
1844 | int i;
|
---|
1845 | #else
|
---|
1846 | size_t i;
|
---|
1847 | #endif
|
---|
1848 |
|
---|
1849 | for (i = 0; i < (sizeof(vendor_card_select_table) / sizeof(*vendor_card_select_table)); ++i)
|
---|
1850 | {
|
---|
1851 | if ((vendor_card_select_table[i].gl_vendor != *gl_vendor)
|
---|
1852 | || (vendor_card_select_table[i].card_vendor != *card_vendor))
|
---|
1853 | continue;
|
---|
1854 | TRACE_(d3d_caps)("Applying card_selector \"%s\".\n", vendor_card_select_table[i].description);
|
---|
1855 | return vendor_card_select_table[i].select_card(gl_info, gl_renderer, vidmem);
|
---|
1856 | }
|
---|
1857 |
|
---|
1858 | FIXME_(d3d_caps)("No card selector available for GL vendor %d and card vendor %04x.\n",
|
---|
1859 | *gl_vendor, *card_vendor);
|
---|
1860 |
|
---|
1861 | /* Default to generic Nvidia hardware based on the supported OpenGL extensions. The choice
|
---|
1862 | * for Nvidia was because the hardware and drivers they make are of good quality. This makes
|
---|
1863 | * them a good generic choice. */
|
---|
1864 | *card_vendor = HW_VENDOR_NVIDIA;
|
---|
1865 | #ifndef VBOX_WITH_WDDM
|
---|
1866 | if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCEFX_5600;
|
---|
1867 | #else
|
---|
1868 | /* tmp work around to disable quirk_no_np2 quirk for not-recognized drivers */
|
---|
1869 | if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE_6200;
|
---|
1870 | #endif
|
---|
1871 |
|
---|
1872 | if (WINE_D3D8_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE3;
|
---|
1873 | if (WINE_D3D7_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE;
|
---|
1874 | if (WINE_D3D6_CAPABLE(gl_info)) return CARD_NVIDIA_RIVA_TNT;
|
---|
1875 | return CARD_NVIDIA_RIVA_128;
|
---|
1876 | }
|
---|
1877 |
|
---|
1878 | #ifndef VBOX_WITH_VMSVGA
|
---|
1879 | static const struct fragment_pipeline *select_fragment_implementation(struct wined3d_adapter *adapter)
|
---|
1880 | {
|
---|
1881 | const struct wined3d_gl_info *gl_info = &adapter->gl_info;
|
---|
1882 | int vs_selected_mode, ps_selected_mode;
|
---|
1883 |
|
---|
1884 | select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
|
---|
1885 | if ((ps_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_GLSL)
|
---|
1886 | && gl_info->supported[ARB_FRAGMENT_PROGRAM]) return &arbfp_fragment_pipeline;
|
---|
1887 | else if (ps_selected_mode == SHADER_ATI) return &atifs_fragment_pipeline;
|
---|
1888 | else if (gl_info->supported[NV_REGISTER_COMBINERS]
|
---|
1889 | && gl_info->supported[NV_TEXTURE_SHADER2]) return &nvts_fragment_pipeline;
|
---|
1890 | else if (gl_info->supported[NV_REGISTER_COMBINERS]) return &nvrc_fragment_pipeline;
|
---|
1891 | else return &ffp_fragment_pipeline;
|
---|
1892 | }
|
---|
1893 | #endif
|
---|
1894 |
|
---|
1895 | static const shader_backend_t *select_shader_backend(struct wined3d_adapter *adapter)
|
---|
1896 | {
|
---|
1897 | int vs_selected_mode, ps_selected_mode;
|
---|
1898 |
|
---|
1899 | select_shader_mode(&adapter->gl_info, &ps_selected_mode, &vs_selected_mode);
|
---|
1900 | if (vs_selected_mode == SHADER_GLSL || ps_selected_mode == SHADER_GLSL) return &glsl_shader_backend;
|
---|
1901 | #ifndef VBOX_WITH_VMSVGA
|
---|
1902 | if (vs_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_ARB) return &arb_program_shader_backend;
|
---|
1903 | #endif
|
---|
1904 | return &none_shader_backend;
|
---|
1905 | }
|
---|
1906 |
|
---|
1907 | #ifndef VBOX_WITH_VMSVGA
|
---|
1908 | static const struct blit_shader *select_blit_implementation(struct wined3d_adapter *adapter)
|
---|
1909 | {
|
---|
1910 | const struct wined3d_gl_info *gl_info = &adapter->gl_info;
|
---|
1911 | int vs_selected_mode, ps_selected_mode;
|
---|
1912 |
|
---|
1913 | select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
|
---|
1914 | if ((ps_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_GLSL)
|
---|
1915 | && gl_info->supported[ARB_FRAGMENT_PROGRAM]) return &arbfp_blit;
|
---|
1916 | else return &ffp_blit;
|
---|
1917 | }
|
---|
1918 | #endif
|
---|
1919 |
|
---|
1920 | #ifdef VBOX_WITH_VMSVGA
|
---|
1921 | /** Checks if @a pszExtension is one of the extensions we're looking for and
|
---|
1922 | * updates @a pGlInfo->supported accordingly. */
|
---|
1923 | static void check_gl_extension(struct wined3d_gl_info *pGlInfo, const char *pszExtension)
|
---|
1924 | {
|
---|
1925 | size_t i;
|
---|
1926 | TRACE_(d3d_caps)("- %s\n", debugstr_a(pszExtension));
|
---|
1927 | for (i = 0; i < RT_ELEMENTS(EXTENSION_MAP); i++)
|
---|
1928 | if (!strcmp(pszExtension, EXTENSION_MAP[i].extension_string))
|
---|
1929 | {
|
---|
1930 | TRACE_(d3d_caps)(" FOUND: %s support.\n", EXTENSION_MAP[i].extension_string);
|
---|
1931 | pGlInfo->supported[EXTENSION_MAP[i].extension] = TRUE;
|
---|
1932 | return;
|
---|
1933 | }
|
---|
1934 | }
|
---|
1935 | #endif
|
---|
1936 |
|
---|
1937 | /* Context activation is done by the caller. */
|
---|
1938 | BOOL IWineD3DImpl_FillGLCaps(struct wined3d_adapter *adapter, struct VBOXVMSVGASHADERIF *pVBoxShaderIf)
|
---|
1939 | {
|
---|
1940 | #ifndef VBOX_WITH_VMSVGA
|
---|
1941 | struct wined3d_driver_info *driver_info = &adapter->driver_info;
|
---|
1942 | #endif
|
---|
1943 | struct wined3d_gl_info *gl_info = &adapter->gl_info;
|
---|
1944 | #ifndef VBOX_WITH_VMSVGA
|
---|
1945 | const char *GL_Extensions = NULL;
|
---|
1946 | const char *WGL_Extensions = NULL;
|
---|
1947 | #endif
|
---|
1948 | const char *gl_vendor_str, *gl_renderer_str, *gl_version_str;
|
---|
1949 | #ifndef VBOX_WITH_VMSVGA
|
---|
1950 | struct fragment_caps fragment_caps;
|
---|
1951 | #endif
|
---|
1952 | enum wined3d_gl_vendor gl_vendor;
|
---|
1953 | enum wined3d_pci_vendor card_vendor;
|
---|
1954 | enum wined3d_pci_device device;
|
---|
1955 | GLint gl_max;
|
---|
1956 | GLfloat gl_floatv[2];
|
---|
1957 | unsigned i;
|
---|
1958 | #ifndef VBOX_WITH_VMSVGA
|
---|
1959 | HDC hdc;
|
---|
1960 | #endif
|
---|
1961 | unsigned int vidmem=0;
|
---|
1962 | DWORD gl_version;
|
---|
1963 | #ifndef VBOX_WITH_VMSVGA
|
---|
1964 | size_t len;
|
---|
1965 | #endif
|
---|
1966 |
|
---|
1967 | TRACE_(d3d_caps)("(%p)\n", gl_info);
|
---|
1968 |
|
---|
1969 | ENTER_GL();
|
---|
1970 |
|
---|
1971 | VBOX_CHECK_GL_CALL(gl_renderer_str = (const char *)glGetString(GL_RENDERER));
|
---|
1972 | TRACE_(d3d_caps)("GL_RENDERER: %s.\n", debugstr_a(gl_renderer_str));
|
---|
1973 | if (!gl_renderer_str)
|
---|
1974 | {
|
---|
1975 | LEAVE_GL();
|
---|
1976 | ERR_(d3d_caps)("Received a NULL GL_RENDERER.\n");
|
---|
1977 | return FALSE;
|
---|
1978 | }
|
---|
1979 |
|
---|
1980 | VBOX_CHECK_GL_CALL(gl_vendor_str = (const char *)glGetString(GL_VENDOR));
|
---|
1981 | TRACE_(d3d_caps)("GL_VENDOR: %s.\n", debugstr_a(gl_vendor_str));
|
---|
1982 | if (!gl_vendor_str)
|
---|
1983 | {
|
---|
1984 | LEAVE_GL();
|
---|
1985 | ERR_(d3d_caps)("Received a NULL GL_VENDOR.\n");
|
---|
1986 | return FALSE;
|
---|
1987 | }
|
---|
1988 |
|
---|
1989 | /* Parse the GL_VERSION field into major and minor information */
|
---|
1990 | VBOX_CHECK_GL_CALL(gl_version_str = (const char *)glGetString(GL_VERSION));
|
---|
1991 | TRACE_(d3d_caps)("GL_VERSION: %s.\n", debugstr_a(gl_version_str));
|
---|
1992 | if (!gl_version_str)
|
---|
1993 | {
|
---|
1994 | LEAVE_GL();
|
---|
1995 | ERR_(d3d_caps)("Received a NULL GL_VERSION.\n");
|
---|
1996 | return FALSE;
|
---|
1997 | }
|
---|
1998 | gl_version = wined3d_parse_gl_version(gl_version_str);
|
---|
1999 |
|
---|
2000 | /*
|
---|
2001 | * Initialize openGL extension related variables
|
---|
2002 | * with Default values
|
---|
2003 | */
|
---|
2004 | memset(gl_info->supported, 0, sizeof(gl_info->supported));
|
---|
2005 | gl_info->limits.blends = 1;
|
---|
2006 | gl_info->limits.buffers = 1;
|
---|
2007 | gl_info->limits.textures = 1;
|
---|
2008 | gl_info->limits.fragment_samplers = 1;
|
---|
2009 | gl_info->limits.vertex_samplers = 0;
|
---|
2010 | gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers + gl_info->limits.vertex_samplers;
|
---|
2011 | gl_info->limits.sampler_stages = 1;
|
---|
2012 | gl_info->limits.glsl_vs_float_constants = 0;
|
---|
2013 | gl_info->limits.glsl_ps_float_constants = 0;
|
---|
2014 | gl_info->limits.arb_vs_float_constants = 0;
|
---|
2015 | gl_info->limits.arb_vs_native_constants = 0;
|
---|
2016 | gl_info->limits.arb_vs_instructions = 0;
|
---|
2017 | gl_info->limits.arb_vs_temps = 0;
|
---|
2018 | gl_info->limits.arb_ps_float_constants = 0;
|
---|
2019 | gl_info->limits.arb_ps_local_constants = 0;
|
---|
2020 | gl_info->limits.arb_ps_instructions = 0;
|
---|
2021 | gl_info->limits.arb_ps_temps = 0;
|
---|
2022 |
|
---|
2023 | /* Retrieve opengl defaults */
|
---|
2024 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_CLIP_PLANES, &gl_max));
|
---|
2025 | gl_info->limits.clipplanes = min(WINED3DMAXUSERCLIPPLANES, gl_max);
|
---|
2026 | TRACE_(d3d_caps)("ClipPlanes support - num Planes=%d\n", gl_max);
|
---|
2027 |
|
---|
2028 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2029 | glGetIntegerv(GL_MAX_LIGHTS, &gl_max);
|
---|
2030 | if (glGetError() != GL_NO_ERROR)
|
---|
2031 | {
|
---|
2032 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2033 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_LIGHTS, &gl_max));
|
---|
2034 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2035 | }
|
---|
2036 | #else
|
---|
2037 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_LIGHTS, &gl_max));
|
---|
2038 | #endif
|
---|
2039 | gl_info->limits.lights = gl_max;
|
---|
2040 | TRACE_(d3d_caps)("Lights support - max lights=%d\n", gl_max);
|
---|
2041 |
|
---|
2042 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_max));
|
---|
2043 | gl_info->limits.texture_size = gl_max;
|
---|
2044 | TRACE_(d3d_caps)("Maximum texture size support - max texture size=%d\n", gl_max);
|
---|
2045 |
|
---|
2046 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2047 | glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv);
|
---|
2048 | if (glGetError() != GL_NO_ERROR)
|
---|
2049 | {
|
---|
2050 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2051 | VBOX_CHECK_GL_CALL(glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv));
|
---|
2052 | if (glGetError() != GL_NO_ERROR)
|
---|
2053 | gl_floatv[0] = gl_floatv[1] = 1;
|
---|
2054 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2055 | }
|
---|
2056 | #else
|
---|
2057 | VBOX_CHECK_GL_CALL(glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv));
|
---|
2058 | #endif
|
---|
2059 | gl_info->limits.pointsize_min = gl_floatv[0];
|
---|
2060 | gl_info->limits.pointsize_max = gl_floatv[1];
|
---|
2061 | TRACE_(d3d_caps)("Maximum point size support - max point size=%f\n", gl_floatv[1]);
|
---|
2062 |
|
---|
2063 | /* Parse the gl supported features, in theory enabling parts of our code appropriately. */
|
---|
2064 | #ifndef VBOX_WITH_VMSVGA
|
---|
2065 | GL_Extensions = (const char *)glGetString(GL_EXTENSIONS);
|
---|
2066 | if (!GL_Extensions)
|
---|
2067 | {
|
---|
2068 | LEAVE_GL();
|
---|
2069 | ERR_(d3d_caps)("Received a NULL GL_EXTENSIONS.\n");
|
---|
2070 | return FALSE;
|
---|
2071 | }
|
---|
2072 |
|
---|
2073 | LEAVE_GL();
|
---|
2074 |
|
---|
2075 | TRACE_(d3d_caps)("GL_Extensions reported:\n");
|
---|
2076 | #endif
|
---|
2077 |
|
---|
2078 | gl_info->supported[WINED3D_GL_EXT_NONE] = TRUE;
|
---|
2079 |
|
---|
2080 | gl_info->supported[VBOX_SHARED_CONTEXTS] = TRUE;
|
---|
2081 |
|
---|
2082 | #ifdef VBOX_WITH_VMSVGA
|
---|
2083 | {
|
---|
2084 | void *pvEnumCtx = NULL;
|
---|
2085 | char szCurExt[256];
|
---|
2086 | while (pVBoxShaderIf->pfnGetNextExtension(pVBoxShaderIf, &pvEnumCtx, szCurExt, sizeof(szCurExt), false /*fOtherProfile*/))
|
---|
2087 | check_gl_extension(gl_info, szCurExt);
|
---|
2088 |
|
---|
2089 | /* The cheap way out. */
|
---|
2090 | pvEnumCtx = NULL;
|
---|
2091 | while (pVBoxShaderIf->pfnGetNextExtension(pVBoxShaderIf, &pvEnumCtx, szCurExt, sizeof(szCurExt), true /*fOtherProfile*/))
|
---|
2092 | check_gl_extension(gl_info, szCurExt);
|
---|
2093 | }
|
---|
2094 | #else /* !VBOX_WITH_VMSVGA */
|
---|
2095 | while (*GL_Extensions)
|
---|
2096 | {
|
---|
2097 | const char *start;
|
---|
2098 | char current_ext[256];
|
---|
2099 |
|
---|
2100 | while (isspace(*GL_Extensions)) ++GL_Extensions;
|
---|
2101 | start = GL_Extensions;
|
---|
2102 | while (!isspace(*GL_Extensions) && *GL_Extensions) ++GL_Extensions;
|
---|
2103 |
|
---|
2104 | len = GL_Extensions - start;
|
---|
2105 | if (!len || len >= sizeof(current_ext)) continue;
|
---|
2106 |
|
---|
2107 | memcpy(current_ext, start, len);
|
---|
2108 | current_ext[len] = '\0';
|
---|
2109 | TRACE_(d3d_caps)("- %s\n", debugstr_a(current_ext));
|
---|
2110 |
|
---|
2111 | for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
|
---|
2112 | {
|
---|
2113 | if (!strcmp(current_ext, EXTENSION_MAP[i].extension_string))
|
---|
2114 | {
|
---|
2115 | TRACE_(d3d_caps)(" FOUND: %s support.\n", EXTENSION_MAP[i].extension_string);
|
---|
2116 | gl_info->supported[EXTENSION_MAP[i].extension] = TRUE;
|
---|
2117 | break;
|
---|
2118 | }
|
---|
2119 | }
|
---|
2120 | }
|
---|
2121 | #endif /* !VBOX_WITH_VMSVGA */
|
---|
2122 |
|
---|
2123 | #ifdef VBOX_WITH_VMSVGA
|
---|
2124 | # ifdef RT_OS_WINDOWS
|
---|
2125 | # define OGLGETPROCADDRESS wglGetProcAddress
|
---|
2126 | # elif RT_OS_DARWIN
|
---|
2127 | # define OGLGETPROCADDRESS(x) MyNSGLGetProcAddress(x)
|
---|
2128 | # else
|
---|
2129 | extern void (*glXGetProcAddress(const GLubyte *procname))( void );
|
---|
2130 | # define OGLGETPROCADDRESS(x) glXGetProcAddress((const GLubyte *)x)
|
---|
2131 | # endif
|
---|
2132 | #endif
|
---|
2133 |
|
---|
2134 | /* Now work out what GL support this card really has */
|
---|
2135 | #define USE_GL_FUNC(type, pfn, ext, replace) \
|
---|
2136 | { \
|
---|
2137 | DWORD ver = ver_for_ext(ext); \
|
---|
2138 | if (gl_info->supported[ext]) gl_info->pfn = (type)OGLGETPROCADDRESS(#pfn); \
|
---|
2139 | else if (ver && ver <= gl_version) gl_info->pfn = (type)OGLGETPROCADDRESS(#replace); \
|
---|
2140 | else gl_info->pfn = NULL; \
|
---|
2141 | }
|
---|
2142 | GL_EXT_FUNCS_GEN;
|
---|
2143 | #undef USE_GL_FUNC
|
---|
2144 |
|
---|
2145 | #ifndef VBOX_WITH_VMSVGA
|
---|
2146 | #define USE_GL_FUNC(type, pfn, ext, replace) gl_info->pfn = (type)OGLGETPROCADDRESS(#pfn);
|
---|
2147 | WGL_EXT_FUNCS_GEN;
|
---|
2148 | #undef USE_GL_FUNC
|
---|
2149 | #endif
|
---|
2150 |
|
---|
2151 | ENTER_GL();
|
---|
2152 |
|
---|
2153 | /* Now mark all the extensions supported which are included in the opengl core version. Do this *after*
|
---|
2154 | * loading the functions, otherwise the code above will load the extension entry points instead of the
|
---|
2155 | * core functions, which may not work. */
|
---|
2156 | for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
|
---|
2157 | {
|
---|
2158 | if (!gl_info->supported[EXTENSION_MAP[i].extension]
|
---|
2159 | && EXTENSION_MAP[i].version <= gl_version && EXTENSION_MAP[i].version)
|
---|
2160 | {
|
---|
2161 | TRACE_(d3d_caps)(" GL CORE: %s support.\n", EXTENSION_MAP[i].extension_string);
|
---|
2162 | gl_info->supported[EXTENSION_MAP[i].extension] = TRUE;
|
---|
2163 | }
|
---|
2164 | }
|
---|
2165 |
|
---|
2166 | if (gl_info->supported[APPLE_FENCE])
|
---|
2167 | {
|
---|
2168 | /* GL_NV_fence and GL_APPLE_fence provide the same functionality basically.
|
---|
2169 | * The apple extension interacts with some other apple exts. Disable the NV
|
---|
2170 | * extension if the apple one is support to prevent confusion in other parts
|
---|
2171 | * of the code. */
|
---|
2172 | gl_info->supported[NV_FENCE] = FALSE;
|
---|
2173 | }
|
---|
2174 | if (gl_info->supported[APPLE_FLOAT_PIXELS])
|
---|
2175 | {
|
---|
2176 | /* GL_APPLE_float_pixels == GL_ARB_texture_float + GL_ARB_half_float_pixel
|
---|
2177 | *
|
---|
2178 | * The enums are the same:
|
---|
2179 | * GL_RGBA16F_ARB = GL_RGBA_FLOAT16_APPLE = 0x881A
|
---|
2180 | * GL_RGB16F_ARB = GL_RGB_FLOAT16_APPLE = 0x881B
|
---|
2181 | * GL_RGBA32F_ARB = GL_RGBA_FLOAT32_APPLE = 0x8814
|
---|
2182 | * GL_RGB32F_ARB = GL_RGB_FLOAT32_APPLE = 0x8815
|
---|
2183 | * GL_HALF_FLOAT_ARB = GL_HALF_APPLE = 0x140B
|
---|
2184 | */
|
---|
2185 | if (!gl_info->supported[ARB_TEXTURE_FLOAT])
|
---|
2186 | {
|
---|
2187 | TRACE_(d3d_caps)(" IMPLIED: GL_ARB_texture_float support(from GL_APPLE_float_pixels.\n");
|
---|
2188 | gl_info->supported[ARB_TEXTURE_FLOAT] = TRUE;
|
---|
2189 | }
|
---|
2190 | if (!gl_info->supported[ARB_HALF_FLOAT_PIXEL])
|
---|
2191 | {
|
---|
2192 | TRACE_(d3d_caps)(" IMPLIED: GL_ARB_half_float_pixel support(from GL_APPLE_float_pixels.\n");
|
---|
2193 | gl_info->supported[ARB_HALF_FLOAT_PIXEL] = TRUE;
|
---|
2194 | }
|
---|
2195 | }
|
---|
2196 | if (gl_info->supported[ARB_MAP_BUFFER_RANGE])
|
---|
2197 | {
|
---|
2198 | /* GL_ARB_map_buffer_range and GL_APPLE_flush_buffer_range provide the same
|
---|
2199 | * functionality. Prefer the ARB extension */
|
---|
2200 | gl_info->supported[APPLE_FLUSH_BUFFER_RANGE] = FALSE;
|
---|
2201 | }
|
---|
2202 | if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
|
---|
2203 | {
|
---|
2204 | TRACE_(d3d_caps)(" IMPLIED: NVIDIA (NV) Texture Gen Reflection support.\n");
|
---|
2205 | gl_info->supported[NV_TEXGEN_REFLECTION] = TRUE;
|
---|
2206 | }
|
---|
2207 | if (!gl_info->supported[ARB_DEPTH_CLAMP] && gl_info->supported[NV_DEPTH_CLAMP])
|
---|
2208 | {
|
---|
2209 | TRACE_(d3d_caps)(" IMPLIED: ARB_depth_clamp support (by NV_depth_clamp).\n");
|
---|
2210 | gl_info->supported[ARB_DEPTH_CLAMP] = TRUE;
|
---|
2211 | }
|
---|
2212 | if (!gl_info->supported[ARB_VERTEX_ARRAY_BGRA] && gl_info->supported[EXT_VERTEX_ARRAY_BGRA])
|
---|
2213 | {
|
---|
2214 | TRACE_(d3d_caps)(" IMPLIED: ARB_vertex_array_bgra support (by EXT_vertex_array_bgra).\n");
|
---|
2215 | gl_info->supported[ARB_VERTEX_ARRAY_BGRA] = TRUE;
|
---|
2216 | }
|
---|
2217 | if (gl_info->supported[NV_TEXTURE_SHADER2])
|
---|
2218 | {
|
---|
2219 | if (gl_info->supported[NV_REGISTER_COMBINERS])
|
---|
2220 | {
|
---|
2221 | /* Also disable ATI_FRAGMENT_SHADER if register combiners and texture_shader2
|
---|
2222 | * are supported. The nv extensions provide the same functionality as the
|
---|
2223 | * ATI one, and a bit more(signed pixelformats). */
|
---|
2224 | gl_info->supported[ATI_FRAGMENT_SHADER] = FALSE;
|
---|
2225 | }
|
---|
2226 | }
|
---|
2227 |
|
---|
2228 | if (gl_info->supported[NV_REGISTER_COMBINERS])
|
---|
2229 | {
|
---|
2230 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_GENERAL_COMBINERS_NV, &gl_max));
|
---|
2231 | gl_info->limits.general_combiners = gl_max;
|
---|
2232 | TRACE_(d3d_caps)("Max general combiners: %d.\n", gl_max);
|
---|
2233 | }
|
---|
2234 | if (gl_info->supported[ARB_DRAW_BUFFERS])
|
---|
2235 | {
|
---|
2236 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &gl_max));
|
---|
2237 | gl_info->limits.buffers = gl_max;
|
---|
2238 | TRACE_(d3d_caps)("Max draw buffers: %u.\n", gl_max);
|
---|
2239 | }
|
---|
2240 | if (gl_info->supported[ARB_MULTITEXTURE])
|
---|
2241 | {
|
---|
2242 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2243 | glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &gl_max);
|
---|
2244 | if (glGetError() != GL_NO_ERROR)
|
---|
2245 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &gl_max));
|
---|
2246 | #else
|
---|
2247 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &gl_max));
|
---|
2248 | #endif
|
---|
2249 | gl_info->limits.textures = min(MAX_TEXTURES, gl_max);
|
---|
2250 | TRACE_(d3d_caps)("Max textures: %d.\n", gl_info->limits.textures);
|
---|
2251 |
|
---|
2252 | if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
|
---|
2253 | {
|
---|
2254 | GLint tmp;
|
---|
2255 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS_ARB, &tmp));
|
---|
2256 | gl_info->limits.fragment_samplers = min(MAX_FRAGMENT_SAMPLERS, tmp);
|
---|
2257 | }
|
---|
2258 | else
|
---|
2259 | {
|
---|
2260 | gl_info->limits.fragment_samplers = max(gl_info->limits.fragment_samplers, (UINT)gl_max);
|
---|
2261 | }
|
---|
2262 | TRACE_(d3d_caps)("Max fragment samplers: %d.\n", gl_info->limits.fragment_samplers);
|
---|
2263 |
|
---|
2264 | if (gl_info->supported[ARB_VERTEX_SHADER])
|
---|
2265 | {
|
---|
2266 | GLint tmp;
|
---|
2267 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB, &tmp));
|
---|
2268 | gl_info->limits.vertex_samplers = tmp;
|
---|
2269 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB, &tmp));
|
---|
2270 | gl_info->limits.combined_samplers = tmp;
|
---|
2271 |
|
---|
2272 | /* Loading GLSL sampler uniforms is much simpler if we can assume that the sampler setup
|
---|
2273 | * is known at shader link time. In a vertex shader + pixel shader combination this isn't
|
---|
2274 | * an issue because then the sampler setup only depends on the two shaders. If a pixel
|
---|
2275 | * shader is used with fixed function vertex processing we're fine too because fixed function
|
---|
2276 | * vertex processing doesn't use any samplers. If fixed function fragment processing is
|
---|
2277 | * used we have to make sure that all vertex sampler setups are valid together with all
|
---|
2278 | * possible fixed function fragment processing setups. This is true if vsamplers + MAX_TEXTURES
|
---|
2279 | * <= max_samplers. This is true on all d3d9 cards that support vtf(gf 6 and gf7 cards).
|
---|
2280 | * dx9 radeon cards do not support vertex texture fetch. DX10 cards have 128 samplers, and
|
---|
2281 | * dx9 is limited to 8 fixed function texture stages and 4 vertex samplers. DX10 does not have
|
---|
2282 | * a fixed function pipeline anymore.
|
---|
2283 | *
|
---|
2284 | * So this is just a check to check that our assumption holds true. If not, write a warning
|
---|
2285 | * and reduce the number of vertex samplers or probably disable vertex texture fetch. */
|
---|
2286 | if (gl_info->limits.vertex_samplers && gl_info->limits.combined_samplers < 12
|
---|
2287 | && MAX_TEXTURES + gl_info->limits.vertex_samplers > gl_info->limits.combined_samplers)
|
---|
2288 | {
|
---|
2289 | FIXME("OpenGL implementation supports %u vertex samplers and %u total samplers.\n",
|
---|
2290 | gl_info->limits.vertex_samplers, gl_info->limits.combined_samplers);
|
---|
2291 | FIXME("Expected vertex samplers + MAX_TEXTURES(=8) > combined_samplers.\n");
|
---|
2292 | if (gl_info->limits.combined_samplers > MAX_TEXTURES)
|
---|
2293 | gl_info->limits.vertex_samplers = gl_info->limits.combined_samplers - MAX_TEXTURES;
|
---|
2294 | else
|
---|
2295 | gl_info->limits.vertex_samplers = 0;
|
---|
2296 | }
|
---|
2297 | }
|
---|
2298 | else
|
---|
2299 | {
|
---|
2300 | gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers;
|
---|
2301 | }
|
---|
2302 | TRACE_(d3d_caps)("Max vertex samplers: %u.\n", gl_info->limits.vertex_samplers);
|
---|
2303 | TRACE_(d3d_caps)("Max combined samplers: %u.\n", gl_info->limits.combined_samplers);
|
---|
2304 | }
|
---|
2305 | if (gl_info->supported[ARB_VERTEX_BLEND])
|
---|
2306 | {
|
---|
2307 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2308 | glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max);
|
---|
2309 | if (glGetError() != GL_NO_ERROR)
|
---|
2310 | {
|
---|
2311 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2312 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max));
|
---|
2313 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2314 | }
|
---|
2315 | #else
|
---|
2316 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max));
|
---|
2317 | #endif
|
---|
2318 | gl_info->limits.blends = gl_max;
|
---|
2319 | TRACE_(d3d_caps)("Max blends: %u.\n", gl_info->limits.blends);
|
---|
2320 | }
|
---|
2321 | if (gl_info->supported[EXT_TEXTURE3D])
|
---|
2322 | {
|
---|
2323 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE_EXT, &gl_max));
|
---|
2324 | gl_info->limits.texture3d_size = gl_max;
|
---|
2325 | TRACE_(d3d_caps)("Max texture3D size: %d.\n", gl_info->limits.texture3d_size);
|
---|
2326 | }
|
---|
2327 | if (gl_info->supported[EXT_TEXTURE_FILTER_ANISOTROPIC])
|
---|
2328 | {
|
---|
2329 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_max));
|
---|
2330 | gl_info->limits.anisotropy = gl_max;
|
---|
2331 | TRACE_(d3d_caps)("Max anisotropy: %d.\n", gl_info->limits.anisotropy);
|
---|
2332 | }
|
---|
2333 | if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
|
---|
2334 | {
|
---|
2335 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2336 | GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
|
---|
2337 | if (glGetError() != GL_NO_ERROR)
|
---|
2338 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2339 | #endif
|
---|
2340 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max)));
|
---|
2341 | gl_info->limits.arb_ps_float_constants = gl_max;
|
---|
2342 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM float constants: %d.\n", gl_info->limits.arb_ps_float_constants);
|
---|
2343 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max)));
|
---|
2344 | gl_info->limits.arb_ps_native_constants = gl_max;
|
---|
2345 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native float constants: %d.\n",
|
---|
2346 | gl_info->limits.arb_ps_native_constants);
|
---|
2347 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max)));
|
---|
2348 | gl_info->limits.arb_ps_temps = gl_max;
|
---|
2349 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_ps_temps);
|
---|
2350 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max)));
|
---|
2351 | gl_info->limits.arb_ps_instructions = gl_max;
|
---|
2352 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native instructions: %d.\n", gl_info->limits.arb_ps_instructions);
|
---|
2353 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &gl_max)));
|
---|
2354 | gl_info->limits.arb_ps_local_constants = gl_max;
|
---|
2355 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM local parameters: %d.\n", gl_info->limits.arb_ps_instructions);
|
---|
2356 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2357 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2358 | #endif
|
---|
2359 | }
|
---|
2360 | if (gl_info->supported[ARB_VERTEX_PROGRAM])
|
---|
2361 | {
|
---|
2362 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2363 | GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
|
---|
2364 | if (glGetError() != GL_NO_ERROR)
|
---|
2365 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2366 | #endif
|
---|
2367 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max)));
|
---|
2368 | gl_info->limits.arb_vs_float_constants = gl_max;
|
---|
2369 | TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM float constants: %d.\n", gl_info->limits.arb_vs_float_constants);
|
---|
2370 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max)));
|
---|
2371 | gl_info->limits.arb_vs_native_constants = gl_max;
|
---|
2372 | TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native float constants: %d.\n",
|
---|
2373 | gl_info->limits.arb_vs_native_constants);
|
---|
2374 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max)));
|
---|
2375 | gl_info->limits.arb_vs_temps = gl_max;
|
---|
2376 | TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_vs_temps);
|
---|
2377 | VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max)));
|
---|
2378 | gl_info->limits.arb_vs_instructions = gl_max;
|
---|
2379 | TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native instructions: %d.\n", gl_info->limits.arb_vs_instructions);
|
---|
2380 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2381 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2382 | #endif
|
---|
2383 | #ifndef VBOX_WITH_VMSVGA
|
---|
2384 | if (test_arb_vs_offset_limit(gl_info)) gl_info->quirks |= WINED3D_QUIRK_ARB_VS_OFFSET_LIMIT;
|
---|
2385 | #endif
|
---|
2386 | }
|
---|
2387 | if (gl_info->supported[ARB_VERTEX_SHADER])
|
---|
2388 | {
|
---|
2389 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB, &gl_max));
|
---|
2390 | gl_info->limits.glsl_vs_float_constants = gl_max / 4;
|
---|
2391 | #ifdef VBOX_WITH_WDDM
|
---|
2392 | /* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
|
---|
2393 | * thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
|
---|
2394 | * win8 Aero won't properly work with this constant < 256 in any way,
|
---|
2395 | * while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
|
---|
2396 | * so use it here.
|
---|
2397 | * @todo: add logging
|
---|
2398 | * @todo: perhaps should be movet to quirks?
|
---|
2399 | * */
|
---|
2400 | if (gl_info->limits.glsl_vs_float_constants < 256 && gl_max >= 256)
|
---|
2401 | {
|
---|
2402 | DWORD dwVersion = GetVersion();
|
---|
2403 | DWORD dwMajor = (DWORD)(LOBYTE(LOWORD(dwVersion)));
|
---|
2404 | DWORD dwMinor = (DWORD)(HIBYTE(LOWORD(dwVersion)));
|
---|
2405 | /* tmp workaround Win8 Aero requirement for 256 */
|
---|
2406 | if (dwMajor > 6 || dwMinor > 1)
|
---|
2407 | {
|
---|
2408 | gl_info->limits.glsl_vs_float_constants = 256;
|
---|
2409 | }
|
---|
2410 | }
|
---|
2411 | #endif
|
---|
2412 | TRACE_(d3d_caps)("Max ARB_VERTEX_SHADER float constants: %u.\n", gl_info->limits.glsl_vs_float_constants);
|
---|
2413 | }
|
---|
2414 | if (gl_info->supported[ARB_FRAGMENT_SHADER])
|
---|
2415 | {
|
---|
2416 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB, &gl_max));
|
---|
2417 | gl_info->limits.glsl_ps_float_constants = gl_max / 4;
|
---|
2418 | #ifdef VBOX_WITH_WDDM
|
---|
2419 | /* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
|
---|
2420 | * thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
|
---|
2421 | * win8 Aero won't properly work with this constant < 256 in any way,
|
---|
2422 | * while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
|
---|
2423 | * so use it here.
|
---|
2424 | * @todo: add logging
|
---|
2425 | * @todo: perhaps should be movet to quirks?
|
---|
2426 | * */
|
---|
2427 | if (gl_info->limits.glsl_ps_float_constants < 256 && gl_max >= 256)
|
---|
2428 | {
|
---|
2429 | DWORD dwVersion = GetVersion();
|
---|
2430 | DWORD dwMajor = (DWORD)(LOBYTE(LOWORD(dwVersion)));
|
---|
2431 | DWORD dwMinor = (DWORD)(HIBYTE(LOWORD(dwVersion)));
|
---|
2432 | /* tmp workaround Win8 Aero requirement for 256 */
|
---|
2433 | if (dwMajor > 6 || dwMinor > 1)
|
---|
2434 | {
|
---|
2435 | gl_info->limits.glsl_ps_float_constants = 256;
|
---|
2436 | }
|
---|
2437 | }
|
---|
2438 | #endif
|
---|
2439 | TRACE_(d3d_caps)("Max ARB_FRAGMENT_SHADER float constants: %u.\n", gl_info->limits.glsl_ps_float_constants);
|
---|
2440 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2441 | glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max);
|
---|
2442 | if (glGetError() != GL_NO_ERROR)
|
---|
2443 | {
|
---|
2444 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2445 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max));
|
---|
2446 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2447 | }
|
---|
2448 | #else
|
---|
2449 | VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max));
|
---|
2450 | #endif
|
---|
2451 | gl_info->limits.glsl_varyings = gl_max;
|
---|
2452 | TRACE_(d3d_caps)("Max GLSL varyings: %u (%u 4 component varyings).\n", gl_max, gl_max / 4);
|
---|
2453 | }
|
---|
2454 | if (gl_info->supported[ARB_SHADING_LANGUAGE_100])
|
---|
2455 | {
|
---|
2456 | const char *str = (const char *)glGetString(GL_SHADING_LANGUAGE_VERSION_ARB);
|
---|
2457 | unsigned int major, minor;
|
---|
2458 |
|
---|
2459 | TRACE_(d3d_caps)("GLSL version string: %s.\n", debugstr_a(str));
|
---|
2460 |
|
---|
2461 | /* The format of the GLSL version string is "major.minor[.release] [vendor info]". */
|
---|
2462 | sscanf(str, "%u.%u", &major, &minor);
|
---|
2463 | gl_info->glsl_version = MAKEDWORD_VERSION(major, minor);
|
---|
2464 | }
|
---|
2465 | if (gl_info->supported[NV_LIGHT_MAX_EXPONENT])
|
---|
2466 | {
|
---|
2467 | #ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
|
---|
2468 | glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess);
|
---|
2469 | if (glGetError() != GL_NO_ERROR)
|
---|
2470 | {
|
---|
2471 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
|
---|
2472 | VBOX_CHECK_GL_CALL(glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess));
|
---|
2473 | pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
|
---|
2474 | }
|
---|
2475 | #else
|
---|
2476 | VBOX_CHECK_GL_CALL(glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess));
|
---|
2477 | #endif
|
---|
2478 | }
|
---|
2479 | else
|
---|
2480 | {
|
---|
2481 | gl_info->limits.shininess = 128.0f;
|
---|
2482 | }
|
---|
2483 | if (gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
|
---|
2484 | {
|
---|
2485 | /* If we have full NP2 texture support, disable
|
---|
2486 | * GL_ARB_texture_rectangle because we will never use it.
|
---|
2487 | * This saves a few redundant glDisable calls. */
|
---|
2488 | gl_info->supported[ARB_TEXTURE_RECTANGLE] = FALSE;
|
---|
2489 | }
|
---|
2490 | if (gl_info->supported[ATI_FRAGMENT_SHADER])
|
---|
2491 | {
|
---|
2492 | /* Disable NV_register_combiners and fragment shader if this is supported.
|
---|
2493 | * generally the NV extensions are preferred over the ATI ones, and this
|
---|
2494 | * extension is disabled if register_combiners and texture_shader2 are both
|
---|
2495 | * supported. So we reach this place only if we have incomplete NV dxlevel 8
|
---|
2496 | * fragment processing support. */
|
---|
2497 | gl_info->supported[NV_REGISTER_COMBINERS] = FALSE;
|
---|
2498 | gl_info->supported[NV_REGISTER_COMBINERS2] = FALSE;
|
---|
2499 | gl_info->supported[NV_TEXTURE_SHADER] = FALSE;
|
---|
2500 | gl_info->supported[NV_TEXTURE_SHADER2] = FALSE;
|
---|
2501 | }
|
---|
2502 | if (gl_info->supported[NV_HALF_FLOAT])
|
---|
2503 | {
|
---|
2504 | /* GL_ARB_half_float_vertex is a subset of GL_NV_half_float. */
|
---|
2505 | gl_info->supported[ARB_HALF_FLOAT_VERTEX] = TRUE;
|
---|
2506 | }
|
---|
2507 | if (gl_info->supported[ARB_POINT_SPRITE])
|
---|
2508 | {
|
---|
2509 | gl_info->limits.point_sprite_units = gl_info->limits.textures;
|
---|
2510 | }
|
---|
2511 | else
|
---|
2512 | {
|
---|
2513 | gl_info->limits.point_sprite_units = 0;
|
---|
2514 | }
|
---|
2515 | #ifndef VBOX_WITH_VMSVGA
|
---|
2516 | checkGLcall("extension detection");
|
---|
2517 | #endif
|
---|
2518 | LEAVE_GL();
|
---|
2519 |
|
---|
2520 | #ifndef VBOX_WITH_VMSVGA
|
---|
2521 | adapter->fragment_pipe = select_fragment_implementation(adapter);
|
---|
2522 | #endif
|
---|
2523 | adapter->shader_backend = select_shader_backend(adapter);
|
---|
2524 | #ifndef VBOX_WITH_VMSVGA
|
---|
2525 | adapter->blitter = select_blit_implementation(adapter);
|
---|
2526 |
|
---|
2527 | adapter->fragment_pipe->get_caps(gl_info, &fragment_caps);
|
---|
2528 | gl_info->limits.texture_stages = fragment_caps.MaxTextureBlendStages;
|
---|
2529 | TRACE_(d3d_caps)("Max texture stages: %u.\n", gl_info->limits.texture_stages);
|
---|
2530 |
|
---|
2531 | /* In some cases the number of texture stages can be larger than the number
|
---|
2532 | * of samplers. The GF4 for example can use only 2 samplers (no fragment
|
---|
2533 | * shaders), but 8 texture stages (register combiners). */
|
---|
2534 | gl_info->limits.sampler_stages = max(gl_info->limits.fragment_samplers, gl_info->limits.texture_stages);
|
---|
2535 | #endif
|
---|
2536 |
|
---|
2537 | if (gl_info->supported[ARB_FRAMEBUFFER_OBJECT])
|
---|
2538 | {
|
---|
2539 | gl_info->fbo_ops.glIsRenderbuffer = gl_info->glIsRenderbuffer;
|
---|
2540 | gl_info->fbo_ops.glBindRenderbuffer = gl_info->glBindRenderbuffer;
|
---|
2541 | gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->glDeleteRenderbuffers;
|
---|
2542 | gl_info->fbo_ops.glGenRenderbuffers = gl_info->glGenRenderbuffers;
|
---|
2543 | gl_info->fbo_ops.glRenderbufferStorage = gl_info->glRenderbufferStorage;
|
---|
2544 | gl_info->fbo_ops.glRenderbufferStorageMultisample = gl_info->glRenderbufferStorageMultisample;
|
---|
2545 | gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->glGetRenderbufferParameteriv;
|
---|
2546 | gl_info->fbo_ops.glIsFramebuffer = gl_info->glIsFramebuffer;
|
---|
2547 | gl_info->fbo_ops.glBindFramebuffer = gl_info->glBindFramebuffer;
|
---|
2548 | gl_info->fbo_ops.glDeleteFramebuffers = gl_info->glDeleteFramebuffers;
|
---|
2549 | gl_info->fbo_ops.glGenFramebuffers = gl_info->glGenFramebuffers;
|
---|
2550 | gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->glCheckFramebufferStatus;
|
---|
2551 | gl_info->fbo_ops.glFramebufferTexture1D = gl_info->glFramebufferTexture1D;
|
---|
2552 | gl_info->fbo_ops.glFramebufferTexture2D = gl_info->glFramebufferTexture2D;
|
---|
2553 | gl_info->fbo_ops.glFramebufferTexture3D = gl_info->glFramebufferTexture3D;
|
---|
2554 | gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->glFramebufferRenderbuffer;
|
---|
2555 | gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameteriv;
|
---|
2556 | gl_info->fbo_ops.glBlitFramebuffer = gl_info->glBlitFramebuffer;
|
---|
2557 | gl_info->fbo_ops.glGenerateMipmap = gl_info->glGenerateMipmap;
|
---|
2558 | }
|
---|
2559 | else
|
---|
2560 | {
|
---|
2561 | if (gl_info->supported[EXT_FRAMEBUFFER_OBJECT])
|
---|
2562 | {
|
---|
2563 | gl_info->fbo_ops.glIsRenderbuffer = gl_info->glIsRenderbufferEXT;
|
---|
2564 | gl_info->fbo_ops.glBindRenderbuffer = gl_info->glBindRenderbufferEXT;
|
---|
2565 | gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->glDeleteRenderbuffersEXT;
|
---|
2566 | gl_info->fbo_ops.glGenRenderbuffers = gl_info->glGenRenderbuffersEXT;
|
---|
2567 | gl_info->fbo_ops.glRenderbufferStorage = gl_info->glRenderbufferStorageEXT;
|
---|
2568 | gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->glGetRenderbufferParameterivEXT;
|
---|
2569 | gl_info->fbo_ops.glIsFramebuffer = gl_info->glIsFramebufferEXT;
|
---|
2570 | gl_info->fbo_ops.glBindFramebuffer = gl_info->glBindFramebufferEXT;
|
---|
2571 | gl_info->fbo_ops.glDeleteFramebuffers = gl_info->glDeleteFramebuffersEXT;
|
---|
2572 | gl_info->fbo_ops.glGenFramebuffers = gl_info->glGenFramebuffersEXT;
|
---|
2573 | gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->glCheckFramebufferStatusEXT;
|
---|
2574 | gl_info->fbo_ops.glFramebufferTexture1D = gl_info->glFramebufferTexture1DEXT;
|
---|
2575 | gl_info->fbo_ops.glFramebufferTexture2D = gl_info->glFramebufferTexture2DEXT;
|
---|
2576 | gl_info->fbo_ops.glFramebufferTexture3D = gl_info->glFramebufferTexture3DEXT;
|
---|
2577 | gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->glFramebufferRenderbufferEXT;
|
---|
2578 | gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameterivEXT;
|
---|
2579 | gl_info->fbo_ops.glGenerateMipmap = gl_info->glGenerateMipmapEXT;
|
---|
2580 | }
|
---|
2581 | #ifndef VBOX_WITH_VMSVGA
|
---|
2582 | else if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
|
---|
2583 | {
|
---|
2584 | WARN_(d3d_caps)("Framebuffer objects not supported, falling back to backbuffer offscreen rendering mode.\n");
|
---|
2585 | wined3d_settings.offscreen_rendering_mode = ORM_BACKBUFFER;
|
---|
2586 | }
|
---|
2587 | #endif
|
---|
2588 | if (gl_info->supported[EXT_FRAMEBUFFER_BLIT])
|
---|
2589 | {
|
---|
2590 | gl_info->fbo_ops.glBlitFramebuffer = gl_info->glBlitFramebufferEXT;
|
---|
2591 | }
|
---|
2592 | if (gl_info->supported[EXT_FRAMEBUFFER_MULTISAMPLE])
|
---|
2593 | {
|
---|
2594 | gl_info->fbo_ops.glRenderbufferStorageMultisample = gl_info->glRenderbufferStorageMultisampleEXT;
|
---|
2595 | }
|
---|
2596 | }
|
---|
2597 |
|
---|
2598 | #ifndef VBOX_WITH_VMSVGA
|
---|
2599 | /* MRTs are currently only supported when FBOs are used. */
|
---|
2600 | if (wined3d_settings.offscreen_rendering_mode != ORM_FBO)
|
---|
2601 | {
|
---|
2602 | gl_info->limits.buffers = 1;
|
---|
2603 | }
|
---|
2604 | #endif
|
---|
2605 | gl_vendor = wined3d_guess_gl_vendor(gl_info, gl_vendor_str, gl_renderer_str);
|
---|
2606 | card_vendor = wined3d_guess_card_vendor(gl_vendor_str, gl_renderer_str);
|
---|
2607 | TRACE_(d3d_caps)("found GL_VENDOR (%s)->(0x%04x/0x%04x)\n", debugstr_a(gl_vendor_str), gl_vendor, card_vendor);
|
---|
2608 |
|
---|
2609 | device = wined3d_guess_card(gl_info, gl_renderer_str, &gl_vendor, &card_vendor, &vidmem);
|
---|
2610 | TRACE_(d3d_caps)("FOUND (fake) card: 0x%x (vendor id), 0x%x (device id)\n", card_vendor, device);
|
---|
2611 |
|
---|
2612 | /* If we have an estimate use it, else default to 64MB; */
|
---|
2613 | if(vidmem)
|
---|
2614 | gl_info->vidmem = vidmem*1024*1024; /* convert from MBs to bytes */
|
---|
2615 | else
|
---|
2616 | gl_info->vidmem = WINE_DEFAULT_VIDMEM;
|
---|
2617 |
|
---|
2618 | gl_info->wrap_lookup[WINED3DTADDRESS_WRAP - WINED3DTADDRESS_WRAP] = GL_REPEAT;
|
---|
2619 | gl_info->wrap_lookup[WINED3DTADDRESS_MIRROR - WINED3DTADDRESS_WRAP] =
|
---|
2620 | gl_info->supported[ARB_TEXTURE_MIRRORED_REPEAT] ? GL_MIRRORED_REPEAT_ARB : GL_REPEAT;
|
---|
2621 | gl_info->wrap_lookup[WINED3DTADDRESS_CLAMP - WINED3DTADDRESS_WRAP] = GL_CLAMP_TO_EDGE;
|
---|
2622 | gl_info->wrap_lookup[WINED3DTADDRESS_BORDER - WINED3DTADDRESS_WRAP] =
|
---|
2623 | gl_info->supported[ARB_TEXTURE_BORDER_CLAMP] ? GL_CLAMP_TO_BORDER_ARB : GL_REPEAT;
|
---|
2624 | gl_info->wrap_lookup[WINED3DTADDRESS_MIRRORONCE - WINED3DTADDRESS_WRAP] =
|
---|
2625 | gl_info->supported[ATI_TEXTURE_MIRROR_ONCE] ? GL_MIRROR_CLAMP_TO_EDGE_ATI : GL_REPEAT;
|
---|
2626 |
|
---|
2627 | #ifndef VBOX_WITH_VMSVGA
|
---|
2628 | /* Make sure there's an active HDC else the WGL extensions will fail */
|
---|
2629 | hdc = pwglGetCurrentDC();
|
---|
2630 | if (hdc) {
|
---|
2631 | /* Not all GL drivers might offer WGL extensions e.g. VirtualBox */
|
---|
2632 | if(GL_EXTCALL(wglGetExtensionsStringARB))
|
---|
2633 | WGL_Extensions = GL_EXTCALL(wglGetExtensionsStringARB(hdc));
|
---|
2634 |
|
---|
2635 | if (NULL == WGL_Extensions) {
|
---|
2636 | ERR(" WGL_Extensions returns NULL\n");
|
---|
2637 | } else {
|
---|
2638 | TRACE_(d3d_caps)("WGL_Extensions reported:\n");
|
---|
2639 | while (*WGL_Extensions != 0x00) {
|
---|
2640 | const char *Start;
|
---|
2641 | char ThisExtn[256];
|
---|
2642 |
|
---|
2643 | while (isspace(*WGL_Extensions)) WGL_Extensions++;
|
---|
2644 | Start = WGL_Extensions;
|
---|
2645 | while (!isspace(*WGL_Extensions) && *WGL_Extensions != 0x00) {
|
---|
2646 | WGL_Extensions++;
|
---|
2647 | }
|
---|
2648 |
|
---|
2649 | len = WGL_Extensions - Start;
|
---|
2650 | if (len == 0 || len >= sizeof(ThisExtn))
|
---|
2651 | continue;
|
---|
2652 |
|
---|
2653 | memcpy(ThisExtn, Start, len);
|
---|
2654 | ThisExtn[len] = '\0';
|
---|
2655 | TRACE_(d3d_caps)("- %s\n", debugstr_a(ThisExtn));
|
---|
2656 |
|
---|
2657 | if (!strcmp(ThisExtn, "WGL_ARB_pixel_format")) {
|
---|
2658 | gl_info->supported[WGL_ARB_PIXEL_FORMAT] = TRUE;
|
---|
2659 | TRACE_(d3d_caps)("FOUND: WGL_ARB_pixel_format support\n");
|
---|
2660 | }
|
---|
2661 | if (!strcmp(ThisExtn, "WGL_WINE_pixel_format_passthrough")) {
|
---|
2662 | gl_info->supported[WGL_WINE_PIXEL_FORMAT_PASSTHROUGH] = TRUE;
|
---|
2663 | TRACE_(d3d_caps)("FOUND: WGL_WINE_pixel_format_passthrough support\n");
|
---|
2664 | }
|
---|
2665 | }
|
---|
2666 | }
|
---|
2667 | }
|
---|
2668 | #endif
|
---|
2669 |
|
---|
2670 | fixup_extensions(gl_info, gl_renderer_str, gl_vendor, card_vendor, device);
|
---|
2671 | #ifndef VBOX_WITH_VMSVGA
|
---|
2672 | init_driver_info(driver_info, card_vendor, device);
|
---|
2673 | add_gl_compat_wrappers(gl_info);
|
---|
2674 | #endif
|
---|
2675 |
|
---|
2676 | return TRUE;
|
---|
2677 | }
|
---|
2678 |
|
---|