[PATCH vkd3d 4/4] tests: Introduce a Vulkan shader runner.

Zebediah Figura zfigura at codeweavers.com
Mon Apr 11 12:18:31 CDT 2022


Signed-off-by: Zebediah Figura <zfigura at codeweavers.com>
---
 Makefile.am                  |    5 +-
 tests/shader_runner.c        |    2 +
 tests/shader_runner.h        |    2 +
 tests/shader_runner_vulkan.c | 1160 ++++++++++++++++++++++++++++++++++
 tests/vulkan_procs.h         |  106 ++++
 5 files changed, 1273 insertions(+), 2 deletions(-)
 create mode 100644 tests/shader_runner_vulkan.c
 create mode 100644 tests/vulkan_procs.h

diff --git a/Makefile.am b/Makefile.am
index 737dc260d..e2a355762 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -297,7 +297,7 @@ vkd3d_compiler_SOURCES = programs/vkd3d-compiler/main.c
 vkd3d_compiler_CFLAGS = $(AM_CFLAGS) @NCURSES_CFLAGS@
 vkd3d_compiler_LDADD = libvkd3d-shader.la @NCURSES_LIBS@
 
-LDADD = libvkd3d.la libvkd3d-utils.la
+LDADD = libvkd3d.la libvkd3d-shader.la libvkd3d-utils.la
 AM_DEFAULT_SOURCE_EXT = .c
 
 TEST_EXTENSIONS = .shader_test
@@ -311,7 +311,8 @@ tests_hlsl_d3d12_LDADD = $(LDADD) @VULKAN_LIBS@
 tests_shader_runner_LDADD = $(LDADD) @VULKAN_LIBS@
 tests_shader_runner_SOURCES = \
 	tests/shader_runner.c \
-	tests/shader_runner_d3d12.c
+	tests/shader_runner_d3d12.c \
+	tests/shader_runner_vulkan.c
 tests_vkd3d_api_LDADD = libvkd3d.la @VULKAN_LIBS@
 tests_vkd3d_shader_api_LDADD = libvkd3d-shader.la
 SHADER_TEST_LOG_COMPILER = tests/shader_runner
diff --git a/tests/shader_runner.c b/tests/shader_runner.c
index bf3bc0d85..54b667fc7 100644
--- a/tests/shader_runner.c
+++ b/tests/shader_runner.c
@@ -856,6 +856,8 @@ START_TEST(shader_runner)
 #ifdef _WIN32
     run_shader_tests_d3d9(argc, argv);
     run_shader_tests_d3d11(argc, argv);
+#else
+    run_shader_tests_vulkan(argc, argv);
 #endif
     run_shader_tests_d3d12(argc, argv);
 }
diff --git a/tests/shader_runner.h b/tests/shader_runner.h
index 5d08206f2..19218ab55 100644
--- a/tests/shader_runner.h
+++ b/tests/shader_runner.h
@@ -131,5 +131,7 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const
 #ifdef _WIN32
 void run_shader_tests_d3d9(int argc, char **argv);
 void run_shader_tests_d3d11(int argc, char **argv);
+#else
+void run_shader_tests_vulkan(int argc, char **argv);
 #endif
 void run_shader_tests_d3d12(int argc, char **argv);
diff --git a/tests/shader_runner_vulkan.c b/tests/shader_runner_vulkan.c
new file mode 100644
index 000000000..9728f192b
--- /dev/null
+++ b/tests/shader_runner_vulkan.c
@@ -0,0 +1,1160 @@
+/*
+ * Shader runner which uses libvkd3d-shader to compile HLSL -> D3D bytecode -> SPIR-V
+ *
+ * Copyright 2020-2022 Zebediah Figura for CodeWeavers
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+#include "config.h"
+#include "vulkan/vulkan.h"
+#include "vkd3d_shader.h"
+#include "vkd3d.h"
+#include "shader_runner.h"
+#define VKD3D_TEST_NO_DEFS
+#include "vkd3d_test.h"
+
+struct vulkan_resource
+{
+    struct resource r;
+
+    VkBuffer buffer;
+    VkImage image;
+    VkImageView view;
+    VkDeviceMemory memory;
+
+    uint32_t binding;
+};
+
+static struct vulkan_resource *vulkan_resource(struct resource *r)
+{
+    return CONTAINING_RECORD(r, struct vulkan_resource, r);
+}
+
+#define DECLARE_VK_PFN(name) PFN_##name name;
+
+struct vulkan_shader_runner
+{
+    struct shader_runner r;
+
+    VkInstance instance;
+    VkPhysicalDevice phys_device;
+    VkDevice device;
+    VkQueue queue;
+    VkCommandPool command_pool;
+    VkCommandBuffer cmd_buffer;
+    VkRenderPass render_pass;
+    VkDescriptorPool descriptor_pool;
+
+    VkImage render_target;
+    VkDeviceMemory rt_memory;
+    VkImageView rtv;
+    VkFramebuffer fb;
+
+    struct vulkan_sampler
+    {
+        VkSampler vk_sampler;
+        uint32_t binding;
+    } samplers[MAX_SAMPLERS];
+
+    DECLARE_VK_PFN(vkCreateInstance);
+#define VK_INSTANCE_PFN   DECLARE_VK_PFN
+#define VK_DEVICE_PFN     DECLARE_VK_PFN
+#include "vulkan_procs.h"
+};
+
+static struct vulkan_shader_runner *vulkan_shader_runner(struct shader_runner *r)
+{
+    return CONTAINING_RECORD(r, struct vulkan_shader_runner, r);
+}
+
+#define VK_CALL(f) (runner->f)
+
+static void begin_command_buffer(struct vulkan_shader_runner *runner)
+{
+    VkCommandBufferBeginInfo buffer_begin_desc = {.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO};
+
+    VK_CALL(vkBeginCommandBuffer(runner->cmd_buffer, &buffer_begin_desc));
+}
+
+static void end_command_buffer(struct vulkan_shader_runner *runner)
+{
+    VkSubmitInfo submit_desc = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO};
+
+    VK_CALL(vkEndCommandBuffer(runner->cmd_buffer));
+
+    submit_desc.commandBufferCount = 1;
+    submit_desc.pCommandBuffers = &runner->cmd_buffer;
+    VK_CALL(vkQueueSubmit(runner->queue, 1, &submit_desc, VK_NULL_HANDLE));
+    VK_CALL(vkQueueWaitIdle(runner->queue));
+}
+
+static void transition_image_layout(struct vulkan_shader_runner *runner,
+        VkImage image, VkImageLayout src_layout, VkImageLayout dst_layout)
+{
+    VkImageMemoryBarrier barrier = {.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER};
+
+    barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
+    barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
+    barrier.oldLayout = src_layout;
+    barrier.newLayout = dst_layout;
+    barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    barrier.image = image;
+    barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+    barrier.subresourceRange.baseMipLevel = 0;
+    barrier.subresourceRange.levelCount = 1;
+    barrier.subresourceRange.baseArrayLayer = 0;
+    barrier.subresourceRange.layerCount = 1;
+
+    VK_CALL(vkCmdPipelineBarrier(runner->cmd_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+            VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, &barrier));
+}
+
+static unsigned int select_vulkan_memory_type(const struct vulkan_shader_runner *runner,
+        uint32_t memory_type_mask, VkMemoryPropertyFlags required_flags)
+{
+    VkPhysicalDeviceMemoryProperties memory_info;
+    unsigned int i;
+
+    VK_CALL(vkGetPhysicalDeviceMemoryProperties(runner->phys_device, &memory_info));
+
+    for (i = 0; i < memory_info.memoryTypeCount; ++i)
+    {
+        if (!(memory_type_mask & (1u << i)))
+            continue;
+        if ((memory_info.memoryTypes[i].propertyFlags & required_flags) == required_flags)
+            return i;
+    }
+
+    fatal_error("No valid memory types found matching mask %#x, property flags %#x.\n",
+            memory_type_mask, required_flags);
+}
+
+static VkDeviceMemory allocate_memory(const struct vulkan_shader_runner *runner,
+        const VkMemoryRequirements *memory_reqs, VkMemoryPropertyFlags flags)
+{
+    VkMemoryAllocateInfo alloc_info = {.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO};
+    VkDeviceMemory vk_memory;
+    VkResult vr;
+
+    alloc_info.allocationSize = memory_reqs->size;
+    alloc_info.memoryTypeIndex = select_vulkan_memory_type(runner,
+            memory_reqs->memoryTypeBits, flags);
+    vr = VK_CALL(vkAllocateMemory(runner->device, &alloc_info, NULL, &vk_memory));
+    ok(vr == VK_SUCCESS, "Got unexpected VkResult %d.\n", vr);
+
+    return vk_memory;
+}
+
+static VkBuffer create_buffer(const struct vulkan_shader_runner *runner, VkDeviceSize size,
+        VkBufferUsageFlags usage, VkMemoryPropertyFlags memory_flags, VkDeviceMemory *memory)
+{
+    VkBufferCreateInfo buffer_info = {.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO};
+    VkMemoryRequirements memory_reqs;
+    VkBuffer buffer;
+
+    buffer_info.size = size;
+    buffer_info.usage = usage;
+    buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+    VK_CALL(vkCreateBuffer(runner->device, &buffer_info, NULL, &buffer));
+    VK_CALL(vkGetBufferMemoryRequirements(runner->device, buffer, &memory_reqs));
+    *memory = allocate_memory(runner, &memory_reqs, memory_flags);
+    VK_CALL(vkBindBufferMemory(runner->device, buffer, *memory, 0));
+
+    return buffer;
+}
+
+static VkImage create_2d_image(const struct vulkan_shader_runner *runner, uint32_t width, uint32_t height,
+        VkImageUsageFlags usage, VkFormat format, VkDeviceMemory *memory)
+{
+    VkImageCreateInfo image_info = {.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO};
+    VkMemoryRequirements memory_reqs;
+    VkImage image;
+
+    image_info.imageType = VK_IMAGE_TYPE_2D;
+    image_info.format = format;
+    image_info.extent.width = width;
+    image_info.extent.height = height;
+    image_info.extent.depth = 1;
+    image_info.mipLevels = 1;
+    image_info.arrayLayers = 1;
+    image_info.samples = VK_SAMPLE_COUNT_1_BIT;
+    image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+    image_info.usage = usage;
+    image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+    VK_CALL(vkCreateImage(runner->device, &image_info, NULL, &image));
+
+    VK_CALL(vkGetImageMemoryRequirements(runner->device, image, &memory_reqs));
+    *memory = allocate_memory(runner, &memory_reqs, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+    VK_CALL(vkBindImageMemory(runner->device, image, *memory, 0));
+
+    return image;
+}
+
+static VkImageView create_2d_image_view(const struct vulkan_shader_runner *runner, VkImage image, VkFormat format)
+{
+    VkImageViewCreateInfo view_info = {.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO};
+    VkImageView view;
+
+    view_info.image = image;
+    view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+    view_info.format = format;
+    view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+    view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+    view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+    view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+    view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+    view_info.subresourceRange.baseMipLevel = 0;
+    view_info.subresourceRange.levelCount = 1;
+    view_info.subresourceRange.baseArrayLayer = 0;
+    view_info.subresourceRange.layerCount = 1;
+
+    VK_CALL(vkCreateImageView(runner->device, &view_info, NULL, &view));
+    return view;
+}
+
+static struct resource *vulkan_runner_create_resource(struct shader_runner *r, const struct resource_params *params)
+{
+    struct vulkan_shader_runner *runner = vulkan_shader_runner(r);
+
+    struct vulkan_resource *resource;
+    VkDevice device = runner->device;
+    VkBufferImageCopy region = {0};
+    VkDeviceMemory staging_memory;
+    VkBuffer staging_buffer;
+    VkFormat format;
+    void *data;
+
+    resource = calloc(1, sizeof(*resource));
+
+    resource->r.slot = params->slot;
+    resource->r.type = params->type;
+
+    switch (params->type)
+    {
+        case RESOURCE_TYPE_TEXTURE:
+            format = vkd3d_get_vk_format(params->format);
+
+            resource->image = create_2d_image(runner, params->width, params->height,
+                    VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, format, &resource->memory);
+            resource->view = create_2d_image_view(runner, resource->image, format);
+
+            staging_buffer = create_buffer(runner, params->data_size,
+                    VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &staging_memory);
+            VK_CALL(vkMapMemory(device, staging_memory, 0, VK_WHOLE_SIZE, 0, &data));
+            memcpy(data, params->data, params->data_size);
+            VK_CALL(vkUnmapMemory(device, staging_memory));
+
+            begin_command_buffer(runner);
+
+            transition_image_layout(runner, resource->image,
+                    VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+            region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+            region.imageSubresource.layerCount = 1;
+            region.imageExtent.width = params->width;
+            region.imageExtent.height = params->height;
+            region.imageExtent.depth = 1;
+            VK_CALL(vkCmdCopyBufferToImage(runner->cmd_buffer, staging_buffer, resource->image,
+                    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region));
+
+            transition_image_layout(runner, resource->image,
+                    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+
+            end_command_buffer(runner);
+
+            VK_CALL(vkFreeMemory(device, staging_memory, NULL));
+            VK_CALL(vkDestroyBuffer(device, staging_buffer, NULL));
+            break;
+
+        case RESOURCE_TYPE_VERTEX_BUFFER:
+            resource->buffer = create_buffer(runner, params->data_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
+                    VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &resource->memory);
+
+            VK_CALL(vkMapMemory(device, resource->memory, 0, VK_WHOLE_SIZE, 0, &data));
+            memcpy(data, params->data, params->data_size);
+            VK_CALL(vkUnmapMemory(device, resource->memory));
+            break;
+    }
+
+    return &resource->r;
+}
+
+static void vulkan_runner_destroy_resource(struct shader_runner *r, struct resource *res)
+{
+    struct vulkan_shader_runner *runner = vulkan_shader_runner(r);
+    struct vulkan_resource *resource = vulkan_resource(res);
+    VkDevice device = runner->device;
+
+    if (resource->memory)
+        VK_CALL(vkFreeMemory(device, resource->memory, NULL));
+    if (resource->image)
+        VK_CALL(vkDestroyImage(device, resource->image, NULL));
+    if (resource->view)
+        VK_CALL(vkDestroyImageView(device, resource->view, NULL));
+    if (resource->buffer)
+        VK_CALL(vkDestroyBuffer(device, resource->buffer, NULL));
+
+    free(resource);
+}
+
+static bool compile_shader(const struct vulkan_shader_runner *runner, const char *source, const char *type,
+        struct vkd3d_shader_code *dxbc, struct vkd3d_shader_code *spirv)
+{
+    struct vkd3d_shader_spirv_target_info spirv_info = {.type = VKD3D_SHADER_STRUCTURE_TYPE_SPIRV_TARGET_INFO};
+    struct vkd3d_shader_interface_info interface_info = {.type = VKD3D_SHADER_STRUCTURE_TYPE_INTERFACE_INFO};
+    struct vkd3d_shader_hlsl_source_info hlsl_info = {.type = VKD3D_SHADER_STRUCTURE_TYPE_HLSL_SOURCE_INFO};
+    struct vkd3d_shader_compile_info info = {.type = VKD3D_SHADER_STRUCTURE_TYPE_COMPILE_INFO};
+    struct vkd3d_shader_resource_binding bindings[MAX_RESOURCES + MAX_SAMPLERS];
+    struct vkd3d_shader_push_constant_buffer push_constants;
+    struct vkd3d_shader_resource_binding *binding;
+    char profile[7];
+    unsigned int i;
+    char *messages;
+    int ret;
+
+    static const char *const shader_models[] =
+    {
+        [SHADER_MODEL_2_0] = "4_0",
+        [SHADER_MODEL_4_0] = "4_0",
+        [SHADER_MODEL_4_1] = "4_1",
+        [SHADER_MODEL_5_0] = "5_0",
+        [SHADER_MODEL_5_1] = "5_1",
+    };
+
+    info.next = &hlsl_info;
+    info.source.code = source;
+    info.source.size = strlen(source);
+    info.source_type = VKD3D_SHADER_SOURCE_HLSL;
+    info.target_type = VKD3D_SHADER_TARGET_DXBC_TPF;
+    info.log_level = VKD3D_SHADER_LOG_WARNING;
+
+    hlsl_info.entry_point = "main";
+    sprintf(profile, "%s_%s", type, shader_models[runner->r.minimum_shader_model]);
+    hlsl_info.profile = profile;
+
+    ret = vkd3d_shader_compile(&info, dxbc, &messages);
+    ok(!ret, "Failed to compile shader, error %d.\n", ret);
+    if (messages && vkd3d_test_state.debug_level)
+        trace("%s\n", messages);
+    vkd3d_shader_free_messages(messages);
+    if (ret)
+        return false;
+
+    info.next = &spirv_info;
+    info.source = *dxbc;
+    info.source_type = VKD3D_SHADER_SOURCE_DXBC_TPF;
+    info.target_type = VKD3D_SHADER_TARGET_SPIRV_BINARY;
+
+    spirv_info.next = &interface_info;
+    spirv_info.environment = VKD3D_SHADER_SPIRV_ENVIRONMENT_VULKAN_1_0;
+
+    push_constants.register_space = 0;
+    push_constants.register_index = 0;
+    push_constants.shader_visibility = VKD3D_SHADER_VISIBILITY_ALL;
+    push_constants.offset = 0;
+    push_constants.size = runner->r.uniform_count * sizeof(*runner->r.uniforms);
+
+    for (i = 0; i < runner->r.resource_count; ++i)
+    {
+        const struct vulkan_resource *resource = vulkan_resource(runner->r.resources[i]);
+
+        switch (resource->r.type)
+        {
+            case RESOURCE_TYPE_VERTEX_BUFFER:
+                break;
+
+            case RESOURCE_TYPE_TEXTURE:
+                binding = &bindings[interface_info.binding_count++];
+                binding->type = VKD3D_SHADER_DESCRIPTOR_TYPE_SRV;
+                binding->register_space = 0;
+                binding->register_index = resource->r.slot;
+                binding->shader_visibility = VKD3D_SHADER_VISIBILITY_ALL;
+                binding->flags = VKD3D_SHADER_BINDING_FLAG_IMAGE;
+                binding->binding.set = 0;
+                binding->binding.binding = resource->binding;
+                binding->binding.count = 1;
+                break;
+        }
+    }
+
+    for (i = 0; i < runner->r.sampler_count; ++i)
+    {
+        binding = &bindings[interface_info.binding_count++];
+        binding->type = VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER;
+        binding->register_space = 0;
+        binding->register_index = runner->r.samplers[i].slot;
+        binding->shader_visibility = VKD3D_SHADER_VISIBILITY_ALL;
+        binding->flags = VKD3D_SHADER_BINDING_FLAG_IMAGE;
+        binding->binding.set = 0;
+        binding->binding.binding = runner->samplers[i].binding;
+        binding->binding.count = 1;
+    }
+
+    interface_info.bindings = bindings;
+
+    interface_info.push_constant_buffer_count = 1;
+    interface_info.push_constant_buffers = &push_constants;
+
+    ret = vkd3d_shader_compile(&info, spirv, &messages);
+    ok(!ret, "Failed to compile shader, error %d.\n", ret);
+    if (messages && vkd3d_test_state.debug_level)
+        trace("%s\n", messages);
+    vkd3d_shader_free_messages(messages);
+    if (ret)
+        return false;
+
+    return true;
+}
+
+static bool create_shader_stage(const struct vulkan_shader_runner *runner,
+        VkPipelineShaderStageCreateInfo *stage_info, const char *type, enum VkShaderStageFlagBits stage,
+        const char *source, struct vkd3d_shader_code *dxbc_ptr)
+{
+    VkShaderModuleCreateInfo module_info = {.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO};
+    struct vkd3d_shader_code spirv, dxbc;
+
+    if (!dxbc_ptr)
+        dxbc_ptr = &dxbc;
+
+    if (!compile_shader(runner, source, type, dxbc_ptr, &spirv))
+        return false;
+
+    if (dxbc_ptr == &dxbc)
+        vkd3d_shader_free_shader_code(&dxbc);
+
+    memset(stage_info, 0, sizeof(*stage_info));
+    stage_info->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+    stage_info->stage = stage;
+    stage_info->pName = "main";
+
+    module_info.codeSize = spirv.size;
+    module_info.pCode = spirv.code;
+
+    VK_CALL(vkCreateShaderModule(runner->device, &module_info, NULL, &stage_info->module));
+    vkd3d_shader_free_shader_code(&spirv);
+    return true;
+}
+
+static VkPrimitiveTopology vulkan_primitive_topology_from_d3d(D3D_PRIMITIVE_TOPOLOGY topology)
+{
+    switch (topology)
+    {
+        default:
+            fatal_error("Unhandled primitive topology %#x.\n", topology);
+            /* fall through */
+        case D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST:
+            return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+        case D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP:
+            return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+    }
+}
+
+static VkPipelineLayout create_pipeline_layout(const struct vulkan_shader_runner *runner,
+        VkDescriptorSetLayout set_layout)
+{
+    VkPipelineLayoutCreateInfo layout_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO};
+    VkPushConstantRange push_constant_range;
+    VkPipelineLayout pipeline_layout;
+
+    layout_desc.setLayoutCount = 1;
+    layout_desc.pSetLayouts = &set_layout;
+
+    if (runner->r.uniform_count)
+    {
+        layout_desc.pushConstantRangeCount = 1;
+        layout_desc.pPushConstantRanges = &push_constant_range;
+
+        push_constant_range.stageFlags = VK_SHADER_STAGE_ALL;
+        push_constant_range.offset = 0;
+        push_constant_range.size = runner->r.uniform_count * sizeof(*runner->r.uniforms);
+    }
+
+    VK_CALL(vkCreatePipelineLayout(runner->device, &layout_desc, NULL, &pipeline_layout));
+
+    return pipeline_layout;
+}
+
+static VkPipeline create_pipeline(const struct vulkan_shader_runner *runner,
+        VkPipelineLayout pipeline_layout, D3D_PRIMITIVE_TOPOLOGY primitive_topology)
+{
+    VkPipelineInputAssemblyStateCreateInfo ia_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO};
+    VkPipelineRasterizationStateCreateInfo rs_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO};
+    VkPipelineVertexInputStateCreateInfo input_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO};
+    VkPipelineColorBlendStateCreateInfo blend_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO};
+    VkPipelineMultisampleStateCreateInfo ms_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO};
+    static const VkViewport viewport = {.width = RENDER_TARGET_WIDTH, .height = RENDER_TARGET_HEIGHT, .maxDepth = 1};
+    VkPipelineViewportStateCreateInfo vp_desc = {.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO};
+    static const VkRect2D rt_rect = {.extent.width = RENDER_TARGET_WIDTH, .extent.height = RENDER_TARGET_HEIGHT};
+    VkGraphicsPipelineCreateInfo pipeline_desc = {.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO};
+    VkPipelineColorBlendAttachmentState attachment_desc = {0};
+    VkVertexInputAttributeDescription input_attributes[32];
+    VkVertexInputBindingDescription input_bindings[32];
+    struct vkd3d_shader_signature vs_input_signature;
+    VkPipelineShaderStageCreateInfo stage_desc[2];
+    struct vkd3d_shader_code vs_dxbc;
+    VkDevice device = runner->device;
+    VkPipeline pipeline;
+    unsigned int i, j;
+    int ret;
+
+    if (!create_shader_stage(runner, &stage_desc[0], "vs", VK_SHADER_STAGE_VERTEX_BIT, runner->r.vs_source, &vs_dxbc))
+        return VK_NULL_HANDLE;
+    if (!create_shader_stage(runner, &stage_desc[1], "ps", VK_SHADER_STAGE_FRAGMENT_BIT, runner->r.ps_source, NULL))
+    {
+        VK_CALL(vkDestroyShaderModule(device, stage_desc[0].module, NULL));
+        return VK_NULL_HANDLE;
+    }
+
+    ret = vkd3d_shader_parse_input_signature(&vs_dxbc, &vs_input_signature, NULL);
+    ok(!ret, "Failed to parse input signature, error %d.\n", ret);
+
+    if (runner->r.input_element_count > ARRAY_SIZE(input_attributes))
+        fatal_error("Input element count %zu is too high.\n", runner->r.input_element_count);
+
+    for (i = 0; i < runner->r.input_element_count; ++i)
+    {
+        VkVertexInputAttributeDescription *attribute = &input_attributes[i];
+        const struct input_element *element = &runner->r.input_elements[i];
+        const struct vkd3d_shader_signature_element *signature_element;
+
+        signature_element = vkd3d_shader_find_signature_element(&vs_input_signature, element->name, element->index, 0);
+
+        attribute->location = signature_element->register_index;
+        attribute->binding = element->slot;
+        attribute->format = vkd3d_get_vk_format(element->format);
+        /* The offset will be filled below. */
+    }
+
+    input_desc.vertexAttributeDescriptionCount = runner->r.input_element_count;
+    input_desc.pVertexAttributeDescriptions = input_attributes;
+    input_desc.pVertexBindingDescriptions = input_bindings;
+
+    for (i = 0; i < runner->r.resource_count; ++i)
+    {
+        const struct vulkan_resource *resource = vulkan_resource(runner->r.resources[i]);
+
+        switch (resource->r.type)
+        {
+            case RESOURCE_TYPE_TEXTURE:
+                break;
+
+            case RESOURCE_TYPE_VERTEX_BUFFER:
+            {
+                VkVertexInputBindingDescription *binding = &input_bindings[input_desc.vertexBindingDescriptionCount++];
+
+                binding->binding = resource->r.slot;
+                binding->stride = 0;
+                binding->inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+                for (j = 0; j < runner->r.input_element_count; ++j)
+                {
+                    if (runner->r.input_elements[j].slot == resource->r.slot)
+                    {
+                        input_attributes[j].offset = binding->stride;
+                        binding->stride += runner->r.input_elements[j].texel_size;
+                    }
+                }
+                break;
+            }
+        }
+    }
+
+    ia_desc.topology = vulkan_primitive_topology_from_d3d(primitive_topology);
+
+    vp_desc.viewportCount = 1;
+    vp_desc.pViewports = &viewport;
+    vp_desc.scissorCount = 1;
+    vp_desc.pScissors = &rt_rect;
+
+    rs_desc.frontFace = VK_FRONT_FACE_CLOCKWISE;
+    rs_desc.lineWidth = 1.0f;
+
+    ms_desc.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+
+    blend_desc.attachmentCount = 1;
+    blend_desc.pAttachments = &attachment_desc;
+
+    attachment_desc.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT
+            | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+    pipeline_desc.stageCount = ARRAY_SIZE(stage_desc);
+    pipeline_desc.pStages = stage_desc;
+    pipeline_desc.pVertexInputState = &input_desc;
+    pipeline_desc.pInputAssemblyState = &ia_desc;
+    pipeline_desc.pViewportState = &vp_desc;
+    pipeline_desc.pRasterizationState = &rs_desc;
+    pipeline_desc.pMultisampleState = &ms_desc;
+    pipeline_desc.pColorBlendState = &blend_desc;
+    pipeline_desc.layout = pipeline_layout;
+    pipeline_desc.renderPass = runner->render_pass;
+    pipeline_desc.subpass = 0;
+
+    VK_CALL(vkCreateGraphicsPipelines(runner->device, VK_NULL_HANDLE, 1, &pipeline_desc, NULL, &pipeline));
+
+    VK_CALL(vkDestroyShaderModule(device, stage_desc[0].module, NULL));
+    VK_CALL(vkDestroyShaderModule(device, stage_desc[1].module, NULL));
+    vkd3d_shader_free_shader_signature(&vs_input_signature);
+    vkd3d_shader_free_shader_code(&vs_dxbc);
+
+    return pipeline;
+}
+
+static VkSamplerAddressMode vk_address_mode_from_d3d12(D3D12_TEXTURE_ADDRESS_MODE mode)
+{
+    switch (mode)
+    {
+        case D3D12_TEXTURE_ADDRESS_MODE_WRAP:
+            return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+        case D3D12_TEXTURE_ADDRESS_MODE_MIRROR:
+            return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+        case D3D12_TEXTURE_ADDRESS_MODE_CLAMP:
+            return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+        case D3D12_TEXTURE_ADDRESS_MODE_BORDER:
+            return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+        default:
+            fatal_error("Unhandled sampler address mode %#x.\n", mode);
+            return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+    }
+}
+
+static VkDescriptorSetLayout create_descriptor_set_layout(struct vulkan_shader_runner *runner)
+{
+    VkDescriptorSetLayoutCreateInfo set_desc = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO};
+    VkDescriptorSetLayoutBinding bindings[MAX_RESOURCES + MAX_SAMPLERS];
+    VkDescriptorSetLayoutBinding *binding;
+    VkDescriptorSetLayout set_layout;
+    uint32_t binding_index = 0;
+    size_t i;
+
+    if (runner->r.resource_count > ARRAY_SIZE(bindings))
+        fatal_error("Resource count %zu is too high.\n", runner->r.resource_count);
+
+    set_desc.pBindings = bindings;
+
+    for (i = 0; i < runner->r.resource_count; ++i)
+    {
+        struct vulkan_resource *resource = vulkan_resource(runner->r.resources[i]);
+
+        switch (resource->r.type)
+        {
+            case RESOURCE_TYPE_VERTEX_BUFFER:
+                break;
+
+            case RESOURCE_TYPE_TEXTURE:
+                binding = &bindings[set_desc.bindingCount++];
+
+                resource->binding = binding_index++;
+
+                binding->binding = resource->binding;
+                binding->descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+                binding->descriptorCount = 1;
+                binding->stageFlags = VK_SHADER_STAGE_ALL;
+                binding->pImmutableSamplers = NULL;
+                break;
+        }
+    }
+
+    for (i = 0; i < runner->r.sampler_count; ++i)
+    {
+        VkSamplerCreateInfo sampler_desc = {.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO};
+        struct vulkan_sampler *vulkan_sampler = &runner->samplers[i];
+        const struct sampler *sampler = &runner->r.samplers[i];
+
+        sampler_desc.magFilter = (sampler->filter & 0x4) ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+        sampler_desc.minFilter = (sampler->filter & 0x1) ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+        sampler_desc.mipmapMode = (sampler->filter & 0x10) ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST;
+        sampler_desc.addressModeU = vk_address_mode_from_d3d12(sampler->u_address);
+        sampler_desc.addressModeV = vk_address_mode_from_d3d12(sampler->v_address);
+        sampler_desc.addressModeW = vk_address_mode_from_d3d12(sampler->w_address);
+
+        VK_CALL(vkCreateSampler(runner->device, &sampler_desc, NULL, &vulkan_sampler->vk_sampler));
+        vulkan_sampler->binding = binding_index++;
+
+        binding = &bindings[set_desc.bindingCount++];
+
+        binding->binding = vulkan_sampler->binding;
+        binding->descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+        binding->descriptorCount = 1;
+        binding->stageFlags = VK_SHADER_STAGE_ALL;
+        binding->pImmutableSamplers = &vulkan_sampler->vk_sampler;
+    }
+
+    VK_CALL(vkCreateDescriptorSetLayout(runner->device, &set_desc, NULL, &set_layout));
+
+    return set_layout;
+}
+
+static void bind_resources(struct vulkan_shader_runner *runner, VkPipelineBindPoint bind_point,
+        VkDescriptorSetLayout set_layout, VkPipelineLayout pipeline_layout)
+{
+    VkDescriptorSetAllocateInfo set_desc = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO};
+    VkCommandBuffer cmd_buffer = runner->cmd_buffer;
+    VkDescriptorSet descriptor_set;
+    unsigned int i;
+
+    set_desc.descriptorPool = runner->descriptor_pool;
+    set_desc.descriptorSetCount = 1;
+    set_desc.pSetLayouts = &set_layout;
+
+    VK_CALL(vkAllocateDescriptorSets(runner->device, &set_desc, &descriptor_set));
+
+    for (i = 0; i < runner->r.resource_count; ++i)
+    {
+        const struct vulkan_resource *resource = vulkan_resource(runner->r.resources[i]);
+        VkWriteDescriptorSet write = {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET};
+        static const VkDeviceSize zero_offset;
+        VkDescriptorImageInfo image_info;
+
+        switch (resource->r.type)
+        {
+            case RESOURCE_TYPE_TEXTURE:
+                image_info.imageView = resource->view;
+                image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+                write.dstSet = descriptor_set;
+                write.dstBinding = resource->binding;
+                write.dstArrayElement = 0;
+                write.descriptorCount = 1;
+                write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+                write.pImageInfo = &image_info;
+
+                VK_CALL(vkUpdateDescriptorSets(runner->device, 1, &write, 0, NULL));
+                break;
+
+            case RESOURCE_TYPE_VERTEX_BUFFER:
+                if (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS)
+                    VK_CALL(vkCmdBindVertexBuffers(cmd_buffer, resource->r.slot, 1, &resource->buffer, &zero_offset));
+                break;
+        }
+    }
+
+    VK_CALL(vkCmdBindDescriptorSets(cmd_buffer, bind_point, pipeline_layout, 0, 1, &descriptor_set, 0, NULL));
+
+    if (runner->r.uniform_count)
+        VK_CALL(vkCmdPushConstants(cmd_buffer, pipeline_layout, VK_SHADER_STAGE_ALL, 0,
+                runner->r.uniform_count * sizeof(*runner->r.uniforms), runner->r.uniforms));
+
+    /* The descriptor set will be freed by resetting the descriptor pool. */
+}
+
+static void vulkan_runner_draw(struct shader_runner *r,
+        D3D_PRIMITIVE_TOPOLOGY primitive_topology, unsigned int vertex_count)
+{
+    struct vulkan_shader_runner *runner = vulkan_shader_runner(r);
+
+    static const VkRect2D rt_rect = {.extent.width = RENDER_TARGET_WIDTH, .extent.height = RENDER_TARGET_HEIGHT};
+    VkRenderPassBeginInfo pass_begin_desc = {.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO};
+    VkCommandBuffer cmd_buffer = runner->cmd_buffer;
+    VkClearAttachment clear_attachment;
+    VkDescriptorSetLayout set_layout;
+    VkPipelineLayout pipeline_layout;
+    VkDevice device = runner->device;
+    VkClearRect clear_rect;
+    VkPipeline pipeline;
+    unsigned int i;
+
+    /* Create this before compiling shaders, it will assign resource bindings. */
+    set_layout = create_descriptor_set_layout(runner);
+
+    pipeline_layout = create_pipeline_layout(runner, set_layout);
+    if (!(pipeline = create_pipeline(runner, pipeline_layout, primitive_topology)))
+        goto out;
+
+    begin_command_buffer(runner);
+
+    pass_begin_desc.renderPass = runner->render_pass;
+    pass_begin_desc.framebuffer = runner->fb;
+    pass_begin_desc.renderArea = rt_rect;
+
+    VK_CALL(vkCmdBeginRenderPass(cmd_buffer, &pass_begin_desc, VK_SUBPASS_CONTENTS_INLINE));
+
+    clear_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+    clear_attachment.colorAttachment = 0;
+    memset(&clear_attachment.clearValue.color, 0, sizeof(clear_attachment.clearValue.color));
+
+    clear_rect.rect = rt_rect;
+    clear_rect.baseArrayLayer = 0;
+    clear_rect.layerCount = 1;
+
+    VK_CALL(vkCmdClearAttachments(cmd_buffer, 1, &clear_attachment, 1, &clear_rect));
+
+    VK_CALL(vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline));
+
+    bind_resources(runner, VK_PIPELINE_BIND_POINT_GRAPHICS, set_layout, pipeline_layout);
+
+    VK_CALL(vkCmdDraw(cmd_buffer, vertex_count, 1, 0, 0));
+
+    VK_CALL(vkCmdEndRenderPass(cmd_buffer));
+    end_command_buffer(runner);
+
+    VK_CALL(vkDestroyPipeline(device, pipeline, NULL));
+    VK_CALL(vkResetDescriptorPool(device, runner->descriptor_pool, 0));
+
+out:
+    for (i = 0; i < runner->r.sampler_count; ++i)
+        VK_CALL(vkDestroySampler(device, runner->samplers[i].vk_sampler, NULL));
+
+    VK_CALL(vkDestroyPipelineLayout(device, pipeline_layout, NULL));
+    VK_CALL(vkDestroyDescriptorSetLayout(device, set_layout, NULL));
+}
+
+static const struct vec4 *get_readback_vec4(const uint8_t *data, unsigned int row_pitch, unsigned int x, unsigned int y)
+{
+    return (struct vec4 *)(data + y * row_pitch + x * sizeof(struct vec4));
+}
+
+static void check_readback_data_vec4(const uint8_t *data, unsigned int row_pitch,
+        const RECT *rect, const struct vec4 *expected, unsigned int max_diff)
+{
+    unsigned int x = 0, y = 0;
+    struct vec4 got = {0};
+    bool all_match = true;
+
+    for (y = rect->top; y < rect->bottom; ++y)
+    {
+        for (x = rect->left; x < rect->right; ++x)
+        {
+            got = *get_readback_vec4(data, row_pitch, x, y);
+            if (!compare_vec4(&got, expected, max_diff))
+            {
+                all_match = false;
+                break;
+            }
+        }
+        if (!all_match)
+            break;
+    }
+    ok(all_match, "Got {%.8e, %.8e, %.8e, %.8e}, expected {%.8e, %.8e, %.8e, %.8e} at (%u, %u).\n",
+            got.x, got.y, got.z, got.w, expected->x, expected->y, expected->z, expected->w, x, y);
+}
+
+static void vulkan_runner_probe_vec4(struct shader_runner *r, const RECT *rect, const struct vec4 *v, unsigned int ulps)
+{
+    struct vulkan_shader_runner *runner = vulkan_shader_runner(r);
+    VkDevice device = runner->device;
+    VkBufferImageCopy region = {0};
+    VkBuffer staging_buffer;
+    unsigned int row_pitch;
+    VkDeviceMemory memory;
+    void *data;
+
+    row_pitch = RENDER_TARGET_WIDTH * sizeof(struct vec4);
+
+    staging_buffer = create_buffer(runner, row_pitch * RENDER_TARGET_HEIGHT,
+            VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memory);
+
+    begin_command_buffer(runner);
+
+    transition_image_layout(runner, runner->render_target,
+            VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+
+    region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+    region.imageSubresource.layerCount = 1;
+    region.imageExtent.width = RENDER_TARGET_WIDTH;
+    region.imageExtent.height = RENDER_TARGET_HEIGHT;
+    region.imageExtent.depth = 1;
+
+    VK_CALL(vkCmdCopyImageToBuffer(runner->cmd_buffer, runner->render_target,
+            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, staging_buffer, 1, &region));
+
+    transition_image_layout(runner, runner->render_target,
+            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    end_command_buffer(runner);
+
+    VK_CALL(vkMapMemory(device, memory, 0, VK_WHOLE_SIZE, 0, &data));
+    check_readback_data_vec4(data, row_pitch, rect, v, ulps);
+    VK_CALL(vkUnmapMemory(device, memory));
+
+    VK_CALL(vkFreeMemory(device, memory, NULL));
+    VK_CALL(vkDestroyBuffer(device, staging_buffer, NULL));
+}
+
+static const struct shader_runner_ops vulkan_runner_ops =
+{
+    .create_resource = vulkan_runner_create_resource,
+    .destroy_resource = vulkan_runner_destroy_resource,
+    .draw = vulkan_runner_draw,
+    .probe_vec4 = vulkan_runner_probe_vec4,
+};
+
+static bool get_graphics_queue_index(const struct vulkan_shader_runner *runner, uint32_t *index)
+{
+    VkQueueFamilyProperties *queue_properties;
+    uint32_t count, i;
+
+    count = 0;
+    VK_CALL(vkGetPhysicalDeviceQueueFamilyProperties(runner->phys_device, &count, NULL));
+    queue_properties = malloc(count * sizeof(*queue_properties));
+    VK_CALL(vkGetPhysicalDeviceQueueFamilyProperties(runner->phys_device, &count, queue_properties));
+
+    for (i = 0; i < count; ++i)
+    {
+        if (queue_properties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT)
+        {
+            free(queue_properties);
+            *index = i;
+            return true;
+        }
+    }
+
+    free(queue_properties);
+    return false;
+}
+
+static const char *const required_device_extensions[] =
+{
+    VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
+};
+
+static bool has_extension(const VkExtensionProperties *extensions, uint32_t count, const char *extension_name)
+{
+    uint32_t i;
+
+    for (i = 0; i < count; ++i)
+    {
+        if (!strcmp(extensions[i].extensionName, extension_name))
+            return true;
+    }
+    return false;
+}
+
+static bool check_device_extensions(struct vulkan_shader_runner *runner)
+{
+    VkPhysicalDevice phys_device = runner->phys_device;
+    VkExtensionProperties *extensions;
+    uint32_t i, count;
+
+    VK_CALL(vkEnumerateDeviceExtensionProperties(phys_device, NULL, &count, NULL));
+    extensions = calloc(count, sizeof(*extensions));
+    VK_CALL(vkEnumerateDeviceExtensionProperties(phys_device, NULL, &count, extensions));
+
+    for (i = 0; i < ARRAY_SIZE(required_device_extensions); ++i)
+    {
+        if (!has_extension(extensions, count, required_device_extensions[i]))
+        {
+            skip("The selected Vulkan device does not support %s.\n", required_device_extensions[i]);
+            return false;
+        }
+    }
+
+    return true;
+}
+
+static bool init_vulkan_runner(struct vulkan_shader_runner *runner)
+{
+    VkDescriptorPoolCreateInfo descriptor_pool_desc = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO};
+    VkCommandBufferAllocateInfo cmd_buffer_desc = {.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO};
+    VkCommandPoolCreateInfo command_pool_desc = {.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO};
+    VkRenderPassCreateInfo render_pass_desc = {.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO};
+    VkDeviceQueueCreateInfo queue_desc = {.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO};
+    VkFramebufferCreateInfo fb_desc = {.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO};
+    VkInstanceCreateInfo instance_desc = {.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO};
+    VkDeviceCreateInfo device_desc = {.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO};
+    VkPhysicalDeviceFeatures ret_features, features;
+    VkDescriptorPoolSize descriptor_pool_sizes[2];
+    VkAttachmentDescription attachment_desc = {0};
+    static const float queue_priority = 1.0f;
+    VkSubpassDescription subpass_desc = {0};
+    VkAttachmentReference color_ref = {0};
+    VkFormatProperties format_props;
+    uint32_t count, graphics_index;
+    VkDevice device;
+    VkResult vr;
+
+    runner->vkCreateInstance = (void *)vkGetInstanceProcAddr(NULL, "vkCreateInstance");
+
+    if ((vr = VK_CALL(vkCreateInstance(&instance_desc, NULL, &runner->instance))) < 0)
+    {
+        skip("Failed to create a Vulkan instance, vr %d.\n", vr);
+        return false;
+    }
+
+#define VK_INSTANCE_PFN(name) runner->name = (void *)vkGetInstanceProcAddr(runner->instance, #name);
+#include "vulkan_procs.h"
+
+    count = 1;
+    if ((vr = VK_CALL(vkEnumeratePhysicalDevices(runner->instance, &count, &runner->phys_device))) < 0)
+    {
+        skip("Failed to enumerate physical devices, vr %d.\n", vr);
+        goto out_destroy_instance;
+    }
+
+    if (!count)
+    {
+        skip("No Vulkan devices are available.\n");
+        goto out_destroy_instance;
+    }
+
+    if (!get_graphics_queue_index(runner, &graphics_index))
+    {
+        skip("The selected Vulkan device does not support graphics operations.\n");
+        goto out_destroy_instance;
+    }
+
+    device_desc.pQueueCreateInfos = &queue_desc;
+    device_desc.queueCreateInfoCount = 1;
+    device_desc.enabledExtensionCount = ARRAY_SIZE(required_device_extensions);
+    device_desc.ppEnabledExtensionNames = required_device_extensions;
+
+    queue_desc.queueFamilyIndex = graphics_index;
+    queue_desc.queueCount = 1;
+    queue_desc.pQueuePriorities = &queue_priority;
+
+    if (!check_device_extensions(runner))
+        goto out_destroy_instance;
+
+    VK_CALL(vkGetPhysicalDeviceFormatProperties(runner->phys_device, VK_FORMAT_R32G32B32A32_SFLOAT, &format_props));
+    if (!(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
+    {
+        skip("The selected Vulkan device does not support R32G32B32A32_SFLOAT render targets.\n");
+        goto out_destroy_instance;
+    }
+
+    VK_CALL(vkGetPhysicalDeviceFeatures(runner->phys_device, &ret_features));
+
+    device_desc.pEnabledFeatures = &features;
+    memset(&features, 0, sizeof(features));
+
+    /* FIXME: Probably make these optional. */
+
+#define ENABLE_FEATURE(x) \
+    if (!ret_features.x) \
+    { \
+        skip("The selected Vulkan device does not support " #x ".\n"); \
+        goto out_destroy_instance; \
+    } \
+    features.x = VK_TRUE
+
+    ENABLE_FEATURE(shaderImageGatherExtended);
+
+    if ((vr = VK_CALL(vkCreateDevice(runner->phys_device, &device_desc, NULL, &device))))
+    {
+        skip("Failed to create device, vr %d.\n", vr);
+        goto out_destroy_instance;
+    }
+    runner->device = device;
+
+#define VK_DEVICE_PFN(name) runner->name = (void *)VK_CALL(vkGetDeviceProcAddr(device, #name));
+#include "vulkan_procs.h"
+
+    VK_CALL(vkGetDeviceQueue(device, graphics_index, 0, &runner->queue));
+
+    command_pool_desc.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+    command_pool_desc.queueFamilyIndex = graphics_index;
+
+    VK_CALL(vkCreateCommandPool(device, &command_pool_desc, NULL, &runner->command_pool));
+
+    cmd_buffer_desc.commandPool = runner->command_pool;
+    cmd_buffer_desc.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+    cmd_buffer_desc.commandBufferCount = 1;
+
+    VK_CALL(vkAllocateCommandBuffers(device, &cmd_buffer_desc, &runner->cmd_buffer));
+
+    attachment_desc.format = VK_FORMAT_R32G32B32A32_SFLOAT;
+    attachment_desc.samples = VK_SAMPLE_COUNT_1_BIT;
+    attachment_desc.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+    attachment_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+    attachment_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+    attachment_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+    attachment_desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+    attachment_desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+    color_ref.attachment = 0;
+    color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+    subpass_desc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+    subpass_desc.colorAttachmentCount = 1;
+    subpass_desc.pColorAttachments = &color_ref;
+
+    render_pass_desc.attachmentCount = 1;
+    render_pass_desc.pAttachments = &attachment_desc;
+    render_pass_desc.subpassCount = 1;
+    render_pass_desc.pSubpasses = &subpass_desc;
+
+    VK_CALL(vkCreateRenderPass(device, &render_pass_desc, NULL, &runner->render_pass));
+
+    runner->render_target = create_2d_image(runner, RENDER_TARGET_WIDTH, RENDER_TARGET_HEIGHT,
+            VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
+            VK_FORMAT_R32G32B32A32_SFLOAT, &runner->rt_memory);
+    runner->rtv = create_2d_image_view(runner, runner->render_target, VK_FORMAT_R32G32B32A32_SFLOAT);
+
+    fb_desc.renderPass = runner->render_pass;
+    fb_desc.attachmentCount = 1;
+    fb_desc.pAttachments = &runner->rtv;
+    fb_desc.width = RENDER_TARGET_WIDTH;
+    fb_desc.height = RENDER_TARGET_HEIGHT;
+    fb_desc.layers = 1;
+
+    VK_CALL(vkCreateFramebuffer(device, &fb_desc, NULL, &runner->fb));
+
+    descriptor_pool_sizes[0].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+    descriptor_pool_sizes[0].descriptorCount = MAX_RESOURCES;
+    descriptor_pool_sizes[1].type = VK_DESCRIPTOR_TYPE_SAMPLER;
+    descriptor_pool_sizes[1].descriptorCount = MAX_SAMPLERS;
+
+    descriptor_pool_desc.maxSets = 1;
+    descriptor_pool_desc.poolSizeCount = ARRAY_SIZE(descriptor_pool_sizes);
+    descriptor_pool_desc.pPoolSizes = descriptor_pool_sizes;
+
+    VK_CALL(vkCreateDescriptorPool(device, &descriptor_pool_desc, NULL, &runner->descriptor_pool));
+
+    begin_command_buffer(runner);
+
+    transition_image_layout(runner, runner->render_target,
+            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    end_command_buffer(runner);
+
+    return true;
+
+out_destroy_instance:
+    VK_CALL(vkDestroyInstance(runner->instance, NULL));
+    return false;
+};
+
+static void cleanup_vulkan_runner(struct vulkan_shader_runner *runner)
+{
+    VkDevice device = runner->device;
+
+    VK_CALL(vkFreeMemory(device, runner->rt_memory, NULL));
+    VK_CALL(vkDestroyFramebuffer(device, runner->fb, NULL));
+    VK_CALL(vkDestroyImageView(device, runner->rtv, NULL));
+    VK_CALL(vkDestroyImage(device, runner->render_target, NULL));
+
+    VK_CALL(vkDestroyDescriptorPool(device, runner->descriptor_pool, NULL));
+    VK_CALL(vkDestroyRenderPass(device, runner->render_pass, NULL));
+    VK_CALL(vkFreeCommandBuffers(device, runner->command_pool, 1, &runner->cmd_buffer));
+    VK_CALL(vkDestroyCommandPool(device, runner->command_pool, NULL));
+    VK_CALL(vkDestroyDevice(device, NULL));
+    VK_CALL(vkDestroyInstance(runner->instance, NULL));
+}
+
+void run_shader_tests_vulkan(int argc, char **argv)
+{
+    struct vulkan_shader_runner runner = {0};
+
+    if (!init_vulkan_runner(&runner))
+        return;
+
+    run_shader_tests(&runner.r, argc, argv, &vulkan_runner_ops);
+
+    cleanup_vulkan_runner(&runner);
+}
diff --git a/tests/vulkan_procs.h b/tests/vulkan_procs.h
new file mode 100644
index 000000000..8e205c1be
--- /dev/null
+++ b/tests/vulkan_procs.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2016 Józef Kucia for CodeWeavers
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+#ifndef VK_INSTANCE_PFN
+# define VK_INSTANCE_PFN(x)
+#endif
+
+#ifndef VK_INSTANCE_EXT_PFN
+# define VK_INSTANCE_EXT_PFN(x)
+#endif
+
+#ifndef VK_DEVICE_PFN
+# define VK_DEVICE_PFN(x)
+#endif
+
+#ifndef VK_DEVICE_EXT_PFN
+# define VK_DEVICE_EXT_PFN(x)
+#endif
+
+/* Instance functions (obtained by vkGetInstanceProcAddr). */
+VK_INSTANCE_PFN(vkDestroyInstance) /* Load vkDestroyInstance() first. */
+VK_INSTANCE_PFN(vkCreateDevice)
+VK_INSTANCE_PFN(vkEnumerateDeviceExtensionProperties)
+VK_INSTANCE_PFN(vkEnumeratePhysicalDevices)
+VK_INSTANCE_PFN(vkGetDeviceProcAddr)
+VK_INSTANCE_PFN(vkGetPhysicalDeviceFeatures)
+VK_INSTANCE_PFN(vkGetPhysicalDeviceFormatProperties)
+VK_INSTANCE_PFN(vkGetPhysicalDeviceMemoryProperties)
+VK_INSTANCE_PFN(vkGetPhysicalDeviceQueueFamilyProperties)
+
+/* Device functions (obtained by vkGetDeviceProcAddr). */
+VK_DEVICE_PFN(vkDestroyDevice) /* Load vkDestroyDevice() first. */
+VK_DEVICE_PFN(vkAllocateCommandBuffers)
+VK_DEVICE_PFN(vkAllocateDescriptorSets)
+VK_DEVICE_PFN(vkAllocateMemory)
+VK_DEVICE_PFN(vkBeginCommandBuffer)
+VK_DEVICE_PFN(vkBindBufferMemory)
+VK_DEVICE_PFN(vkBindImageMemory)
+VK_DEVICE_PFN(vkCmdBeginRenderPass)
+VK_DEVICE_PFN(vkCmdBindDescriptorSets)
+VK_DEVICE_PFN(vkCmdBindPipeline)
+VK_DEVICE_PFN(vkCmdBindVertexBuffers)
+VK_DEVICE_PFN(vkCmdClearAttachments)
+VK_DEVICE_PFN(vkCmdCopyBufferToImage)
+VK_DEVICE_PFN(vkCmdCopyImageToBuffer)
+VK_DEVICE_PFN(vkCmdDraw)
+VK_DEVICE_PFN(vkCmdEndRenderPass)
+VK_DEVICE_PFN(vkCmdFillBuffer)
+VK_DEVICE_PFN(vkCmdPipelineBarrier)
+VK_DEVICE_PFN(vkCmdPushConstants)
+VK_DEVICE_PFN(vkCreateBuffer)
+VK_DEVICE_PFN(vkCreateCommandPool)
+VK_DEVICE_PFN(vkCreateDescriptorPool)
+VK_DEVICE_PFN(vkCreateDescriptorSetLayout)
+VK_DEVICE_PFN(vkCreateFramebuffer)
+VK_DEVICE_PFN(vkCreateGraphicsPipelines)
+VK_DEVICE_PFN(vkCreateImage)
+VK_DEVICE_PFN(vkCreateImageView)
+VK_DEVICE_PFN(vkCreatePipelineLayout)
+VK_DEVICE_PFN(vkCreateRenderPass)
+VK_DEVICE_PFN(vkCreateSampler)
+VK_DEVICE_PFN(vkCreateShaderModule)
+VK_DEVICE_PFN(vkDestroyBuffer)
+VK_DEVICE_PFN(vkDestroyCommandPool)
+VK_DEVICE_PFN(vkDestroyDescriptorPool)
+VK_DEVICE_PFN(vkDestroyDescriptorSetLayout)
+VK_DEVICE_PFN(vkDestroyFramebuffer)
+VK_DEVICE_PFN(vkDestroyImage)
+VK_DEVICE_PFN(vkDestroyImageView)
+VK_DEVICE_PFN(vkDestroyPipeline)
+VK_DEVICE_PFN(vkDestroyPipelineLayout)
+VK_DEVICE_PFN(vkDestroyRenderPass)
+VK_DEVICE_PFN(vkDestroySampler)
+VK_DEVICE_PFN(vkDestroyShaderModule)
+VK_DEVICE_PFN(vkEndCommandBuffer)
+VK_DEVICE_PFN(vkFreeCommandBuffers)
+VK_DEVICE_PFN(vkFreeMemory)
+VK_DEVICE_PFN(vkGetBufferMemoryRequirements)
+VK_DEVICE_PFN(vkGetDeviceQueue)
+VK_DEVICE_PFN(vkGetImageMemoryRequirements)
+VK_DEVICE_PFN(vkMapMemory)
+VK_DEVICE_PFN(vkQueueSubmit)
+VK_DEVICE_PFN(vkQueueWaitIdle)
+VK_DEVICE_PFN(vkResetDescriptorPool)
+VK_DEVICE_PFN(vkUnmapMemory)
+VK_DEVICE_PFN(vkUpdateDescriptorSets)
+
+#undef VK_INSTANCE_PFN
+#undef VK_INSTANCE_EXT_PFN
+#undef VK_DEVICE_PFN
+#undef VK_DEVICE_EXT_PFN
-- 
2.35.1




More information about the wine-devel mailing list