On 26 July 2016 at 19:09, Patrick Rudolph <siro(a)das-labor.org> wrote:
+ static const struct
+ {
+ const char *flags_s;
+ unsigned int flags;
+ DWORD usage;
+ BOOL unsyncronized;
+ BOOL todo;
+ BOOL broken;
+ }
+ /* Nvidia ignores D3DLOCK_NOOVERWRITE on non dynamic buffers,
+ * while AMD always accepts D3DLOCK_NOOVERWRITE on all buffers. */
+ tests[] =
+ {
+ {"0", 0, 0, FALSE, FALSE, FALSE},
+ {"0", 0, D3DUSAGE_DYNAMIC, FALSE, FALSE, FALSE},
+ {"D3DLOCK_NOOVERWRITE", D3DLOCK_NOOVERWRITE, 0, FALSE, FALSE, TRUE},
+ {"D3DLOCK_NOOVERWRITE", D3DLOCK_NOOVERWRITE, D3DUSAGE_DYNAMIC, TRUE,
FALSE, FALSE},
+ {"D3DLOCK_DISCARD", D3DLOCK_DISCARD, 0, FALSE, FALSE, FALSE},
+ {"D3DLOCK_DISCARD", D3DLOCK_DISCARD, D3DUSAGE_DYNAMIC, FALSE, FALSE,
FALSE},
+ {"D3DLOCK_NOOVERWRITE | D3DLOCK_DISCARD",
+ D3DLOCK_NOOVERWRITE | D3DLOCK_DISCARD, 0, FALSE, FALSE, TRUE},
+ {"D3DLOCK_NOOVERWRITE | D3DLOCK_DISCARD",
+ D3DLOCK_NOOVERWRITE | D3DLOCK_DISCARD, D3DUSAGE_DYNAMIC, TRUE, TRUE,
TRUE},
+ {"D3DLOCK_NOOVERWRITE | D3DLOCK_READONLY",
+ D3DLOCK_NOOVERWRITE | D3DLOCK_READONLY, 0, FALSE, FALSE, TRUE},
+ {"D3DLOCK_NOOVERWRITE | D3DLOCK_READONLY",
+ D3DLOCK_NOOVERWRITE | D3DLOCK_READONLY, D3DUSAGE_DYNAMIC, TRUE, TRUE,
TRUE},
+ };
[...]
+ todo_wine_if (tests[i].todo)
+ {
+ ok(tests[i].unsyncronized == unsyncronized || broken(tests[i].broken),
+ "Expected buffer mapped %s. Flags = %s. Usage = %#x.\n",
+ tests[i].unsyncronized ? "unsyncronized" :
"syncronized", tests[i].flags_s, tests[i].usage);
+ }
I assume the broken() is for the testbot, but the way this is written,
half the cases aren't really tested on Windows. Also, what makes these
fail on the testbot?
+ timestamp1 = GetTickCount();
+
+ hr = IDirect3DDevice9_BeginScene(device);
+ ok(SUCCEEDED(hr), "Failed to begin scene, hr %#x.\n", hr);
+ hr = IDirect3DDevice9_DrawPrimitive(device, D3DPT_TRIANGLESTRIP, 0, tri);
+ ok(SUCCEEDED(hr), "Failed to draw, hr %#x.\n", hr);
+ hr = IDirect3DDevice9_EndScene(device);
+ ok(SUCCEEDED(hr), "Failed to end scene, hr %#x.\n", hr);
+
+ /* dummy read to wait for GPU being done */
+ color = getPixelColor(device, 160, 360);
+
+ timestamp2 = GetTickCount();
+
+ /* Adjust buffer size for 1 second draw call duration */
+ size = (size * 1000UL) / (timestamp2 - timestamp1 + 1);
Does this work
reliably? GetTickCount() has very limited resolution, I
think there's a real chance that the draw will take only a fraction of
GetTickCount()'s resolution.