require "mspmath";
require "mspgui";
require "sigc++-2.0";
- if_arch "android"
- {
- require "opengles";
- };
- if_arch "!android"
+
+ feature "vulkan" "Use Vulkan instead of OpenGL as backend";
+ if_feature "vulkan"
{
- require "opengl";
+ require "vulkan";
};
- generate "GLEX"
+ if_feature "!vulkan"
{
- in_suffix ".glext";
- out_suffix ".cpp";
- out_suffix ".h";
- command "scripts/extgen.py";
if_arch "android"
{
- argument "gles2";
+ require "opengles";
+ };
+ if_arch "!android"
+ {
+ require "opengl";
+ };
+
+ generate "GLEX"
+ {
+ in_suffix ".glext";
+ out_suffix ".cpp";
+ out_suffix ".h";
+ command "scripts/extgen.py";
+ if_arch "android"
+ {
+ argument "gles2";
+ };
};
};
source "source/resources";
source "source/glsl";
source "source/builders";
- source "source/backends/opengl";
+ if_feature "vulkan"
+ {
+ source "source/backends/vulkan";
+ build_info
+ {
+ incpath "source/backends/vulkan";
+ };
+ };
+ if_feature "!vulkan"
+ {
+ source "source/backends/opengl";
+ build_info
+ {
+ incpath "source/backends/opengl";
+ };
+ };
source "extensions";
source "builtin_data";
source "shaderlib";
incpath "source/animation";
incpath "source/resources";
incpath "source/builders";
- incpath "source/backends/opengl";
standard CXX "c++11";
};
install true;
map "source/glsl" "include/msp/gl/glsl";
map "source/builders" "include/msp/gl";
map "source/backends/opengl" "include/msp/gl";
+ map "source/backends/vulkan" "include/msp/gl";
map "extensions" "include/msp/gl/extensions";
};
};
--- /dev/null
+#ifndef MSP_GL_CAMERA_BACKEND_H_
+#define MSP_GL_CAMERA_BACKEND_H_
+
+namespace Msp {
+namespace GL {
+
+class Matrix;
+
+class OpenGLCamera
+{
+protected:
+ static void adjust_projection_matrix(Matrix &) { }
+};
+
+using CameraBackend = OpenGLCamera;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
namespace Msp {
namespace GL {
+void OpenGLCommands::submit_frame()
+{
+ glFlush();
+}
+
void OpenGLCommands::use_pipeline(const PipelineState *ps)
{
pipeline_state = ps;
OpenGLCommands() = default;
+ void begin_frame(unsigned) { }
+ void submit_frame();
+
void use_pipeline(const PipelineState *);
void clear(const ClearValue *);
void draw(const Batch &);
OpenGLSpirVModule() = default;
OpenGLSpirVModule(OpenGLSpirVModule &&) { };
~OpenGLSpirVModule() = default;
+
+ void create() { }
};
using SpirVModuleBackend = OpenGLSpirVModule;
--- /dev/null
+// No OpenGL-specific declarations
--- /dev/null
+#include "backend.h"
+
+namespace Msp {
+namespace GL {
+
+GraphicsApi get_backend_api()
+{
+ return VULKAN;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#include <stdexcept>
+#include "batch_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_index_type(DataType t)
+{
+ switch(t)
+ {
+ case UNSIGNED_SHORT: return VK_INDEX_TYPE_UINT16;
+ case UNSIGNED_INT: return VK_INDEX_TYPE_UINT32;
+ default: throw invalid_argument("get_vulkan_index_type");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_BATCH_BACKEND_H_
+#define MSP_GL_BATCH_BACKEND_H_
+
+#include "datatype.h"
+#include "primitivetype.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanBatch
+{
+ friend class VulkanCommands;
+
+protected:
+ VulkanBatch(PrimitiveType) { }
+
+ static bool check_restart(bool) { return true; }
+
+ void set_index_type(DataType) { }
+};
+
+using BatchBackend = VulkanBatch;
+
+unsigned get_vulkan_index_type(DataType);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "blend.h"
+#include "blend_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_blend_equation(BlendEquation eq)
+{
+ switch(eq)
+ {
+ case ADD: return VK_BLEND_OP_ADD;
+ case SUBTRACT: return VK_BLEND_OP_SUBTRACT;
+ case REVERSE_SUBTRACT: return VK_BLEND_OP_REVERSE_SUBTRACT;
+ case MIN: return VK_BLEND_OP_MIN;
+ case MAX: return VK_BLEND_OP_MAX;
+ default: throw invalid_argument("get_vulkan_blend_equation");
+ }
+}
+
+unsigned get_vulkan_blend_factor(BlendFactor factor)
+{
+ switch(factor)
+ {
+ case ZERO: return VK_BLEND_FACTOR_ZERO;
+ case ONE: return VK_BLEND_FACTOR_ONE;
+ case SRC_COLOR: return VK_BLEND_FACTOR_SRC_COLOR;
+ case ONE_MINUS_SRC_COLOR: return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+ case SRC_ALPHA: return VK_BLEND_FACTOR_SRC_ALPHA;
+ case ONE_MINUS_SRC_ALPHA: return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ case DST_COLOR: return VK_BLEND_FACTOR_DST_COLOR;
+ case ONE_MINUS_DST_COLOR: return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+ case DST_ALPHA: return VK_BLEND_FACTOR_DST_ALPHA;
+ case ONE_MINUS_DST_ALPHA: return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+ case CONSTANT_COLOR: return VK_BLEND_FACTOR_CONSTANT_COLOR;
+ case ONE_MINUS_CONSTANT_COLOR: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+ case CONSTANT_ALPHA: return VK_BLEND_FACTOR_CONSTANT_ALPHA;
+ case ONE_MINUS_CONSTANT_ALPHA: return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
+ default: throw invalid_argument("get_vulkan_blend_factor");
+ }
+}
+
+unsigned get_vulkan_color_mask(ColorWriteMask mask)
+{
+ unsigned result = 0;
+ if(mask&WRITE_RED)
+ result |= VK_COLOR_COMPONENT_R_BIT;
+ if(mask&WRITE_GREEN)
+ result |= VK_COLOR_COMPONENT_G_BIT;
+ if(mask&WRITE_BLUE)
+ result |= VK_COLOR_COMPONENT_B_BIT;
+ if(mask&WRITE_ALPHA)
+ result |= VK_COLOR_COMPONENT_A_BIT;
+ return result;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_BLEND_BACKEND_H_
+#define MSP_GL_BLEND_BACKEND_H_
+
+#ifndef MSP_GL_BLEND_H_
+#error "blend_backend.h requires blend.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_blend_equation(BlendEquation);
+unsigned get_vulkan_blend_factor(BlendFactor);
+unsigned get_vulkan_color_mask(ColorWriteMask);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "buffer.h"
+#include "buffer_backend.h"
+#include "device.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanBuffer::VulkanBuffer():
+ device(Device::get_current())
+{ }
+
+VulkanBuffer::VulkanBuffer(VulkanBuffer &&other):
+ device(other.device),
+ handle(other.handle),
+ memory_id(other.memory_id),
+ mapped_address(other.mapped_address),
+ debug_name(move(other.debug_name))
+{
+ other.handle = 0;
+ other.memory_id = 0;
+ other.mapped_address = 0;
+}
+
+VulkanBuffer::~VulkanBuffer()
+{
+ if(handle)
+ device.get_destroy_queue().destroy(handle, memory_id);
+}
+
+void VulkanBuffer::allocate()
+{
+ const Buffer &self = *static_cast<const Buffer *>(this);
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkBufferCreateInfo buffer_info = { };
+ buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ buffer_info.size = self.size;
+ buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT|
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT|VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ vk.CreateBuffer(buffer_info, handle);
+
+ memory_id = device.get_allocator().allocate(handle, (self.usage==STREAMING ? STREAMING_MEMORY : DEVICE_MEMORY));
+
+ if(!debug_name.empty())
+ set_vulkan_object_name();
+}
+
+void VulkanBuffer::sub_data(size_t off, size_t sz, const void *d)
+{
+ void *staging = device.get_transfer_queue().prepare_transfer(sz, [this, off, sz](VkCommandBuffer cmd_buf, VkBuffer staging_buf, size_t src_off){
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkBufferCopy region = { };
+ region.srcOffset = src_off;
+ region.dstOffset = off;
+ region.size = sz;
+ vk.CmdCopyBuffer(cmd_buf, staging_buf, handle, 1, ®ion);
+
+ VkBufferMemoryBarrier barrier = { };
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.buffer = handle_cast<::VkBuffer>(handle);
+ barrier.offset = off;
+ barrier.size = sz;
+
+ vk.CmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ 0, 0, 0, 1, &barrier, 0, 0);
+ });
+
+ const char *src = static_cast<const char *>(d);
+ copy(src, src+sz, static_cast<char *>(staging));
+}
+
+bool VulkanBuffer::can_map() const
+{
+ return static_cast<const Buffer *>(this)->usage==STREAMING;
+}
+
+void *VulkanBuffer::map()
+{
+ size_t size = static_cast<const Buffer *>(this)->size;
+ mapped_address = device.get_allocator().map(memory_id, 0, size);
+ return mapped_address;
+}
+
+bool VulkanBuffer::unmap()
+{
+ device.get_allocator().unmap(mapped_address);
+ mapped_address = 0;
+ return true;
+}
+
+void VulkanBuffer::set_debug_name(const string &name)
+{
+#ifdef DEBUG
+ debug_name = name;
+ if(handle)
+ set_vulkan_object_name();
+#else
+ (void)name;
+#endif
+}
+
+void VulkanBuffer::set_vulkan_object_name() const
+{
+#ifdef DEBUG
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkDebugUtilsObjectNameInfoEXT name_info = { };
+ name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ name_info.objectType = VK_OBJECT_TYPE_BUFFER;
+ name_info.objectHandle = reinterpret_cast<uint64_t>(handle);
+ name_info.pObjectName = debug_name.c_str();
+ vk.SetDebugUtilsObjectName(name_info);
+#endif
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_BUFFER_BACKEND_H_
+#define MSP_GL_BUFFER_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanBuffer: public NonCopyable
+{
+ friend class VulkanPipelineState;
+ friend class VulkanVertexSetup;
+
+protected:
+ Device &device;
+ VkBuffer handle = 0;
+ unsigned memory_id = 0;
+ void *mapped_address = 0;
+ std::string debug_name;
+
+ VulkanBuffer();
+ VulkanBuffer(VulkanBuffer &&);
+ ~VulkanBuffer();
+
+ void allocate();
+ void sub_data(std::size_t, std::size_t, const void *);
+
+ bool can_map() const;
+ void *map();
+ bool unmap();
+
+ void set_debug_name(const std::string &);
+ void set_vulkan_object_name() const;
+};
+
+using BufferBackend = VulkanBuffer;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "camera_backend.h"
+#include "matrix.h"
+
+namespace Msp {
+namespace GL {
+
+void VulkanCamera::adjust_projection_matrix(Matrix &proj_matrix)
+{
+ Matrix adjust;
+ adjust(1, 1) = -1.0f;
+ adjust(2, 2) = 0.5f;
+ adjust(2, 3) = 0.5f;
+ proj_matrix = adjust*proj_matrix;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_CAMERA_BACKEND_H_
+#define MSP_GL_CAMERA_BACKEND_H_
+
+namespace Msp {
+namespace GL {
+
+class Matrix;
+
+class VulkanCamera
+{
+protected:
+ static void adjust_projection_matrix(Matrix &);
+};
+
+using CameraBackend = VulkanCamera;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/hash.h>
+#include <msp/graphics/vulkancontext_platform.h>
+#include "batch.h"
+#include "commands_backend.h"
+#include "device.h"
+#include "error.h"
+#include "framebuffer.h"
+#include "frameformat.h"
+#include "pipelinestate.h"
+#include "rect.h"
+#include "semaphore.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanCommands::VulkanCommands():
+ device(Device::get_current())
+{ }
+
+VulkanCommands::~VulkanCommands()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.QueueWaitIdle();
+}
+
+void VulkanCommands::begin_buffer()
+{
+ if(!current_pool)
+ throw invalid_operation("VulkanCommands::begin_buffer");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(!current_pool->in_use)
+ {
+ current_pool->fence.reset();
+ current_pool->in_use = true;
+ }
+
+ VkCommandBufferAllocateInfo alloc_info = { };
+ alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ alloc_info.commandPool = handle_cast<::VkCommandPool>(current_pool->pool);
+ alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ alloc_info.commandBufferCount = 1;
+
+ vk.AllocateCommandBuffers(alloc_info, ¤t_buffer);
+
+ VkCommandBufferBeginInfo begin_info = { };
+ begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+
+ vk.BeginCommandBuffer(current_buffer, begin_info);
+}
+
+void VulkanCommands::begin_render_pass(const ClearValue *clear_values)
+{
+ const Framebuffer *target = pipeline_state->get_framebuffer();
+ if(!target)
+ throw invalid_operation("VulkanCommands::begin_render_pass");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(!current_buffer)
+ begin_buffer();
+
+ device.get_transfer_queue().dispatch_transfers(current_buffer);
+
+ // TODO Use proper value for to_present
+ render_pass = device.get_pipeline_cache().get_render_pass(target->get_format(), clear_values, true);
+
+ target->refresh();
+
+ VkRenderPassBeginInfo begin_info = { };
+ begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ begin_info.renderPass = handle_cast<::VkRenderPass>(render_pass);
+ begin_info.framebuffer = handle_cast<::VkFramebuffer>(target->handle);
+
+ const Rect *viewport = pipeline_state->get_viewport();
+ if(viewport)
+ {
+ begin_info.renderArea.offset.x = viewport->left;
+ begin_info.renderArea.offset.y = viewport->bottom;
+ begin_info.renderArea.extent.width = viewport->width;
+ begin_info.renderArea.extent.height = viewport->height;
+ }
+ else
+ {
+ begin_info.renderArea.extent.width = target->get_width();
+ begin_info.renderArea.extent.height = target->get_height();
+ }
+
+ VkClearValue vk_clear_values[7];
+ if(clear_values)
+ {
+ unsigned i = 0;
+ for(FrameAttachment a: target->get_format())
+ {
+ if(get_attach_point(a)==get_attach_point(DEPTH_ATTACHMENT))
+ vk_clear_values[i].depthStencil.depth = clear_values[i].depth_stencil.depth;
+ else if(get_attach_point(a)==get_attach_point(STENCIL_ATTACHMENT))
+ vk_clear_values[i].depthStencil.stencil = clear_values[i].depth_stencil.stencil;
+ else
+ {
+ vk_clear_values[i].color.float32[0] = clear_values[i].color.r;
+ vk_clear_values[i].color.float32[1] = clear_values[i].color.g;
+ vk_clear_values[i].color.float32[2] = clear_values[i].color.b;
+ vk_clear_values[i].color.float32[3] = clear_values[i].color.a;
+ }
+ ++i;
+ }
+
+ begin_info.clearValueCount = target->get_format().size();
+ begin_info.pClearValues = vk_clear_values;
+ }
+
+ vk.CmdBeginRenderPass(current_buffer, begin_info, VK_SUBPASS_CONTENTS_INLINE);
+}
+
+void VulkanCommands::end_render_pass()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.CmdEndRenderPass(current_buffer);
+ render_pass = 0;
+}
+
+void VulkanCommands::begin_frame(unsigned index)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ unsigned pool_index = index%device.get_n_frames_in_flight();
+ if(pool_index>=command_pools.size())
+ {
+ command_pools.reserve(pool_index+1);
+ for(unsigned i=command_pools.size(); i<pool_index+1; ++i)
+ command_pools.emplace_back(device);
+ }
+
+ current_pool = &command_pools[pool_index];
+ if(current_pool->in_use)
+ {
+ current_pool->fence.wait();
+ vk.ResetCommandPool(current_pool->pool, 0);
+ current_pool->in_use = false;
+ }
+}
+
+void VulkanCommands::submit_frame(Semaphore *wait_sem, Semaphore *signal_sem)
+{
+ if(!current_buffer)
+ return;
+
+ const VulkanFunctions &vk = device.get_functions();
+ ::VkSemaphore vk_wait_sem = (wait_sem ? handle_cast<::VkSemaphore>(wait_sem->handle) : 0);
+ ::VkSemaphore vk_signal_sem = (signal_sem ? handle_cast<::VkSemaphore>(signal_sem->handle) : 0);
+ VkPipelineStageFlags wait_stages = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+
+ if(render_pass)
+ end_render_pass();
+
+ vk.EndCommandBuffer(current_buffer);
+
+ VkSubmitInfo submit_info = { };
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.waitSemaphoreCount = (wait_sem ? 1 : 0);
+ submit_info.pWaitSemaphores = &vk_wait_sem;
+ submit_info.pWaitDstStageMask = &wait_stages;
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = handle_cast<::VkCommandBuffer *>(¤t_buffer);
+ submit_info.signalSemaphoreCount = (signal_sem ? 1 : 0);
+ submit_info.pSignalSemaphores = &vk_signal_sem;
+
+ vk.QueueSubmit(1, &submit_info, current_pool->fence.handle);
+
+ current_buffer = 0;
+}
+
+void VulkanCommands::use_pipeline(const PipelineState *ps)
+{
+ if(!pipeline_state || !ps || ps->get_framebuffer()!=pipeline_state->get_framebuffer() || ps->get_viewport()!=pipeline_state->get_viewport())
+ if(render_pass)
+ end_render_pass();
+
+ pipeline_state = ps;
+ if(pipeline_state)
+ pipeline_state->refresh();
+}
+
+void VulkanCommands::clear(const ClearValue *values)
+{
+ if(render_pass)
+ throw invalid_operation("VulkanCommands::clear");
+
+ begin_render_pass(values);
+}
+
+void VulkanCommands::draw(const Batch &batch)
+{
+ draw_instanced(batch, 1);
+}
+
+void VulkanCommands::draw_instanced(const Batch &batch, unsigned count)
+{
+ if(!pipeline_state)
+ throw invalid_operation("VulkanCommands::draw_instanced");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(!render_pass)
+ begin_render_pass(0);
+
+ pipeline_state->apply(current_buffer);
+ unsigned first_index = batch.get_offset()/batch.get_index_size();
+ vk.CmdDrawIndexed(current_buffer, batch.size(), count, first_index, 0, 0);
+}
+
+void VulkanCommands::resolve_multisample(Framebuffer &)
+{
+ throw logic_error("VulkanCommands::resolve_multisample is unimplemented");
+}
+
+void VulkanCommands::begin_query(const QueryPool &, unsigned)
+{
+ throw logic_error("VulkanCommands::begin_query is unimplemented");
+}
+
+void VulkanCommands::end_query(const QueryPool &, unsigned)
+{
+ throw logic_error("VulkanCommands::end_query is unimplemented");
+}
+
+
+VulkanCommands::CommandPool::CommandPool(Device &d):
+ device(d),
+ fence(true)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkCommandPoolCreateInfo pool_info = { };
+ pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ pool_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+ pool_info.queueFamilyIndex = device.get_context().get_private().graphics_queue_family;
+
+ vk.CreateCommandPool(pool_info, pool);
+}
+
+VulkanCommands::CommandPool::CommandPool(CommandPool &&other):
+ device(other.device),
+ pool(other.pool),
+ fence(move(other.fence)),
+ in_use(other.in_use)
+{
+ other.pool = 0;
+}
+
+VulkanCommands::CommandPool::~CommandPool()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(pool)
+ vk.DestroyCommandPool(pool);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_COMMANDS_BACKEND_H_
+#define MSP_GL_COMMANDS_BACKEND_H_
+
+#include <vector>
+#include "fence.h"
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Batch;
+union ClearValue;
+class Device;
+class Framebuffer;
+class PipelineState;
+class QueryPool;
+class Semaphore;
+class SwapChain;
+
+class VulkanCommands
+{
+protected:
+ struct CommandPool
+ {
+ Device &device;
+ VkCommandPool pool = 0;
+ Fence fence;
+ bool in_use = false;
+
+ CommandPool(Device &);
+ CommandPool(CommandPool &&);
+ ~CommandPool();
+ };
+
+ Device &device;
+ std::vector<CommandPool> command_pools;
+ CommandPool *current_pool = 0;
+ VkCommandBuffer current_buffer = 0;
+ const PipelineState *pipeline_state = 0;
+ VkRenderPass render_pass = 0;
+
+ VulkanCommands();
+ ~VulkanCommands();
+
+ void begin_buffer();
+ void begin_render_pass(const ClearValue *);
+ void end_render_pass();
+
+ void begin_frame(unsigned);
+ void submit_frame();
+ void submit_frame(Semaphore *, Semaphore *);
+
+ void use_pipeline(const PipelineState *);
+ void clear(const ClearValue *);
+ void draw(const Batch &);
+ void draw_instanced(const Batch &, unsigned);
+ void resolve_multisample(Framebuffer &);
+
+ void begin_query(const QueryPool &, unsigned);
+ void end_query(const QueryPool &, unsigned);
+};
+
+using CommandsBackend = VulkanCommands;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
+
--- /dev/null
+#include "datatype.h"
+
+namespace Msp {
+namespace GL {
+
+void require_type(DataType)
+{ }
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+// No Vulkan-specific declarations
--- /dev/null
+#include "destroyqueue.h"
+#include "device.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+DestroyQueue::DestroyQueue(Device &d):
+ device(d)
+{ }
+
+DestroyQueue::~DestroyQueue()
+{
+ while(!queue.empty())
+ tick();
+}
+
+void DestroyQueue::destroy(VkBuffer handle, unsigned mem_id)
+{
+ destroy<VkBuffer, &VulkanFunctions::DestroyBuffer>(handle, mem_id);
+}
+
+void DestroyQueue::destroy(VkFence handle)
+{
+ destroy<VkFence, &VulkanFunctions::DestroyFence>(handle);
+}
+
+void DestroyQueue::destroy(VkFramebuffer handle)
+{
+ destroy<VkFramebuffer, &VulkanFunctions::DestroyFramebuffer>(handle);
+}
+
+void DestroyQueue::destroy(VkImageView handle)
+{
+ destroy<VkImageView, &VulkanFunctions::DestroyImageView>(handle);
+}
+
+void DestroyQueue::destroy(VkSemaphore handle)
+{
+ destroy<VkSemaphore, &VulkanFunctions::DestroySemaphore>(handle);
+}
+
+template<typename T, void (VulkanFunctions::*destroy_func)(T) const>
+void DestroyQueue::destroy(T handle, unsigned mem_id)
+{
+ Entry entry;
+ entry.handle = handle;
+ entry.destroy_func = [](const VulkanFunctions &vk, void *h){ (vk.*destroy_func)(static_cast<T>(h)); };
+ entry.memory_id = mem_id;
+ entry.on_frame = current_frame+MAX_FRAMES_IN_FLIGHT;
+ queue.push_back(entry);
+}
+
+void DestroyQueue::tick()
+{
+ const VulkanFunctions &vk = device.get_functions();
+ MemoryAllocator &allocator = device.get_allocator();
+
+ ++current_frame;
+ while(!queue.empty() && current_frame>=queue.front().on_frame)
+ {
+ const Entry &e = queue.front();
+ e.destroy_func(vk, e.handle);
+ if(e.memory_id)
+ allocator.release(e.memory_id);
+ queue.pop_front();
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_DESTROYQUEUE_H_
+#define MSP_GL_VULKAN_DESTROYQUEUE_H_
+
+#include <deque>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+class VulkanFunctions;
+
+class DestroyQueue
+{
+private:
+ struct Entry
+ {
+ void *handle = 0;
+ void (*destroy_func)(const VulkanFunctions &, void *) = 0;
+ unsigned memory_id = 0;
+ unsigned on_frame = 0;
+ };
+
+ Device &device;
+ std::deque<Entry> queue;
+ unsigned current_frame = 0;
+
+public:
+ DestroyQueue(Device &);
+ ~DestroyQueue();
+
+ void destroy(VkBuffer, unsigned);
+ void destroy(VkFence);
+ void destroy(VkFramebuffer);
+ void destroy(VkImageView);
+ void destroy(VkSemaphore);
+
+private:
+ template<typename T, void (VulkanFunctions::*)(T) const>
+ void destroy(T, unsigned = 0);
+
+public:
+ void tick();
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/graphics/vulkancontext_platform.h>
+#include "device.h"
+#include "device_backend.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanDevice::VulkanDevice(Graphics::Window &wnd, const Graphics::VulkanOptions &opts):
+ context(wnd, opts),
+ device(handle_cast<VkDevice>(context.get_private().device)),
+ graphics_queue(handle_cast<VkQueue>(context.get_private().graphics_queue)),
+ functions(new VulkanFunctions(context)),
+ allocator(*static_cast<Device *>(this)),
+ destroy_queue(*static_cast<Device *>(this)),
+ transfer_queue(*static_cast<Device *>(this)),
+ pipeline_cache(*static_cast<Device *>(this))
+{ }
+
+// Cause the destructor of RefPtr<VulkanFunctions> to be emitted here
+VulkanDevice::~VulkanDevice()
+{ }
+
+Graphics::VulkanOptions VulkanDevice::create_default_options()
+{
+ Graphics::VulkanOptions opts;
+#ifdef DEBUG
+ opts.enable_validation = true;
+ opts.enable_debug_report = true;
+#endif
+ return opts;
+}
+
+void VulkanDevice::fill_info()
+{
+ DeviceInfo &info = static_cast<Device *>(this)->info;
+
+ VkPhysicalDeviceProperties props;
+ functions->GetPhysicalDeviceProperties(props);
+
+ info.api_version.major = (props.apiVersion>>22)&0x7F;
+ info.api_version.minor = (props.apiVersion>>12)&0x3FF;
+
+ DeviceLimits &limits = info.limits;
+ limits.max_clip_planes = props.limits.maxClipDistances;
+ limits.max_vertex_attributes = props.limits.maxVertexInputAttributes;
+ limits.max_texture_bindings = props.limits.maxDescriptorSetSampledImages;
+ limits.max_color_attachments = props.limits.maxColorAttachments;
+ unsigned samples = props.limits.framebufferColorSampleCounts&props.limits.framebufferDepthSampleCounts&props.limits.framebufferStencilSampleCounts;
+ if(samples&VK_SAMPLE_COUNT_64_BIT)
+ limits.max_samples = 64;
+ else if(samples&VK_SAMPLE_COUNT_32_BIT)
+ limits.max_samples = 32;
+ else if(samples&VK_SAMPLE_COUNT_16_BIT)
+ limits.max_samples = 16;
+ else if(samples&VK_SAMPLE_COUNT_8_BIT)
+ limits.max_samples = 8;
+ limits.max_uniform_bindings = props.limits.maxDescriptorSetUniformBuffers;
+ limits.uniform_buffer_alignment = props.limits.minUniformBufferOffsetAlignment;
+ limits.max_anisotropy = props.limits.maxSamplerAnisotropy;
+
+ info.glsl_features = SL::Features::from_api_version(info.api, info.api_version);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_DEVICE_BACKEND_H_
+#define MSP_GL_DEVICE_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include <msp/graphics/vulkancontext.h>
+#include "destroyqueue.h"
+#include "handles.h"
+#include "memoryallocator.h"
+#include "pipelinecache.h"
+#include "transferqueue.h"
+
+namespace Msp {
+namespace GL {
+
+struct VulkanFunctions;
+
+constexpr unsigned MAX_FRAMES_IN_FLIGHT = 3;
+
+class VulkanDevice: public NonCopyable
+{
+protected:
+ Graphics::VulkanContext context;
+ VkDevice device;
+ VkQueue graphics_queue;
+ RefPtr<VulkanFunctions> functions;
+ MemoryAllocator allocator;
+ DestroyQueue destroy_queue;
+ TransferQueue transfer_queue;
+ PipelineCache pipeline_cache;
+ unsigned n_frames_in_flight = 3;
+
+ VulkanDevice(Graphics::Window &, const Graphics::VulkanOptions &);
+ ~VulkanDevice();
+
+ static Graphics::VulkanOptions create_default_options();
+
+ void fill_info();
+
+ Graphics::VulkanContext &get_context() { return context; }
+
+public:
+ const VulkanFunctions &get_functions() const { return *functions; }
+ MemoryAllocator &get_allocator() { return allocator; }
+ DestroyQueue &get_destroy_queue() { return destroy_queue; }
+ TransferQueue &get_transfer_queue() { return transfer_queue; }
+ PipelineCache &get_pipeline_cache() { return pipeline_cache; }
+ unsigned get_n_frames_in_flight() const { return n_frames_in_flight; }
+};
+
+using DeviceBackend = VulkanDevice;
+using DeviceOptions = Graphics::VulkanOptions;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <limits>
+#include "device.h"
+#include "fence.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+Fence::Fence(bool create_signaled):
+ device(Device::get_current())
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkFenceCreateInfo fence_info = { };
+ fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ fence_info.flags = (create_signaled ? VK_FENCE_CREATE_SIGNALED_BIT : 0);
+
+ vk.CreateFence(fence_info, handle);
+}
+
+Fence::Fence(Fence &&other):
+ device(other.device),
+ handle(other.handle)
+{
+ other.handle = 0;
+}
+
+Fence::~Fence()
+{
+ if(handle)
+ device.get_destroy_queue().destroy(handle);
+}
+
+bool Fence::get_status()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ Result result = vk.GetFenceStatus(handle);
+ if(result==VK_NOT_READY)
+ return false;
+ else
+ result.check();
+
+ return true;
+}
+
+void Fence::reset()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.ResetFences(1, &handle);
+}
+
+void Fence::wait()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.WaitForFences(1, &handle, VK_TRUE, numeric_limits<uint64_t>::max());
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_FENCE_H_
+#define MSP_GL_VULKAN_FENCE_H_
+
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class Fence
+{
+ friend class VulkanCommands;
+
+private:
+ Device &device;
+ VkFence handle;
+
+public:
+ Fence(bool = false);
+ Fence(Fence &&);
+ ~Fence();
+
+ bool get_status();
+ void reset();
+ void wait();
+};
+
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/strings/format.h>
+#include "device.h"
+#include "framebuffer.h"
+#include "framebuffer_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanFramebuffer::VulkanFramebuffer(bool):
+ device(Device::get_current())
+{ }
+
+VulkanFramebuffer::VulkanFramebuffer(VulkanFramebuffer &&other):
+ device(other.device),
+ handle(other.handle),
+ debug_name(move(other.debug_name))
+{
+ other.handle = 0;
+}
+
+VulkanFramebuffer::~VulkanFramebuffer()
+{
+ if(handle)
+ device.get_destroy_queue().destroy(handle);
+}
+
+bool VulkanFramebuffer::is_format_supported(const FrameFormat &fmt)
+{
+ const VulkanFunctions &vk = device.get_functions();
+ for(FrameAttachment a: fmt)
+ {
+ PixelFormat pf = get_attachment_pixelformat(a);
+ PixelComponents comp = get_components(pf);
+ VkFormatProperties props;
+ vk.GetPhysicalDeviceFormatProperties(static_cast<VkFormat>(get_vulkan_pixelformat(pf)), props);
+ if(comp==DEPTH_COMPONENT || comp==STENCIL_INDEX)
+ {
+ if(!(props.optimalTilingFeatures&VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+ return false;
+ }
+ else if(!(props.optimalTilingFeatures&VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT))
+ return false;
+ }
+
+ return true;
+}
+
+void VulkanFramebuffer::update(unsigned) const
+{
+ const Framebuffer &self = *static_cast<const Framebuffer *>(this);
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(handle)
+ device.get_destroy_queue().destroy(handle);
+
+ VkImageView vk_attachments[FrameFormat::MAX_ATTACHMENTS] = { };
+ unsigned i = 0;
+ for(const Framebuffer::Attachment &a: self.attachments)
+ {
+ if(a.tex->view_type!=VK_IMAGE_VIEW_TYPE_2D || a.level || a.layer)
+ throw logic_error("Unimplemented texture type in VulkanFramebuffer::update");
+ vk_attachments[i++] = a.tex->view_handle;
+ }
+
+ VkRenderPass render_pass = device.get_pipeline_cache().get_render_pass(self.format, false, false);
+
+ VkFramebufferCreateInfo framebuffer_info = { };
+ framebuffer_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ framebuffer_info.renderPass = handle_cast<::VkRenderPass>(render_pass);
+ framebuffer_info.attachmentCount = self.format.size();
+ framebuffer_info.pAttachments = handle_cast<::VkImageView *>(vk_attachments);
+ framebuffer_info.width = self.width;
+ framebuffer_info.height = self.height;
+ framebuffer_info.layers = 1;
+
+ vk.CreateFramebuffer(framebuffer_info, handle);
+
+ if(!debug_name.empty())
+ set_vulkan_object_name();
+}
+
+void VulkanFramebuffer::set_debug_name(const string &name)
+{
+#ifdef DEBUG
+ debug_name = name;
+ if(handle)
+ set_vulkan_object_name();
+#else
+ (void)name;
+#endif
+}
+
+void VulkanFramebuffer::set_vulkan_object_name() const
+{
+#ifdef DEBUG
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkDebugUtilsObjectNameInfoEXT name_info = { };
+ name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ name_info.objectType = VK_OBJECT_TYPE_FRAMEBUFFER;
+ name_info.objectHandle = reinterpret_cast<uint64_t>(handle);
+ name_info.pObjectName = debug_name.c_str();
+ vk.SetDebugUtilsObjectName(name_info);
+#endif
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_FRAMEBUFFER_BACKEND_H_
+#define MSP_GL_FRAMEBUFFER_BACKEND_H_
+
+#include <string>
+#include "frameformat.h"
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanFramebuffer: public NonCopyable
+{
+ friend class VulkanCommands;
+ friend class VulkanPipelineState;
+
+protected:
+ Device &device;
+ mutable VkFramebuffer handle = 0;
+ std::string debug_name;
+
+ VulkanFramebuffer(bool);
+ VulkanFramebuffer(VulkanFramebuffer &&);
+ ~VulkanFramebuffer();
+
+ bool is_format_supported(const FrameFormat &);
+ static void require_layered() { }
+
+ void update(unsigned) const;
+ void require_complete() const { }
+
+ void set_debug_name(const std::string &);
+ void set_vulkan_object_name() const;
+};
+
+using FramebufferBackend = VulkanFramebuffer;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <stdexcept>
+#include "frameformat.h"
+#include "frameformat_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_samples(unsigned samples)
+{
+ switch(samples)
+ {
+ case 1: return VK_SAMPLE_COUNT_1_BIT;
+ case 2: return VK_SAMPLE_COUNT_2_BIT;
+ case 4: return VK_SAMPLE_COUNT_4_BIT;
+ case 8: return VK_SAMPLE_COUNT_8_BIT;
+ case 16: return VK_SAMPLE_COUNT_16_BIT;
+ case 32: return VK_SAMPLE_COUNT_32_BIT;
+ case 64: return VK_SAMPLE_COUNT_64_BIT;
+ default: throw invalid_argument("get_vulkan_samples");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_FRAMEFORMAT_BACKEND_H_
+#define MSP_GL_FRAMEFORMAT_BACKEND_H_
+
+#ifndef MSP_GL_FRAMEFORMAT_H_
+#error "frameformat_backend.h requires frameformat.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_samples(unsigned);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#ifndef MSP_GL_VULKAN_HANDLES_H_
+#define MSP_GL_VULKAN_HANDLES_H_
+
+namespace Msp {
+namespace GL {
+
+struct VkBuffer_Type;
+using VkBuffer = VkBuffer_Type *;
+
+struct VkCommandBuffer_Type;
+using VkCommandBuffer = VkCommandBuffer_Type *;
+
+struct VkCommandPool_Type;
+using VkCommandPool = VkCommandPool_Type *;
+
+struct VkDescriptorPool_Type;
+using VkDescriptorPool = VkDescriptorPool_Type *;
+
+struct VkDescriptorSet_Type;
+using VkDescriptorSet = VkDescriptorSet_Type *;
+
+struct VkDescriptorSetLayout_Type;
+using VkDescriptorSetLayout = VkDescriptorSetLayout_Type *;
+
+struct VkDevice_Type;
+using VkDevice = VkDevice_Type *;
+
+struct VkDeviceMemory_Type;
+using VkDeviceMemory = VkDeviceMemory_Type *;
+
+struct VkFence_Type;
+using VkFence = VkFence_Type *;
+
+struct VkFramebuffer_Type;
+using VkFramebuffer = VkFramebuffer_Type *;
+
+struct VkImage_Type;
+using VkImage = VkImage_Type *;
+
+struct VkImageView_Type;
+using VkImageView = VkImageView_Type *;
+
+struct VkPhysicalDevice_Type;
+using VkPhysicalDevice = VkPhysicalDevice_Type *;
+
+struct VkPipeline_Type;
+using VkPipeline = VkPipeline_Type *;
+
+struct VkPipelineCache_Type;
+using VkPipelineCache = VkPipelineCache_Type *;
+
+struct VkPipelineLayout_Type;
+using VkPipelineLayout = VkPipelineLayout_Type *;
+
+struct VkQueue_Type;
+using VkQueue = VkQueue_Type *;
+
+struct VkRenderPass_Type;
+using VkRenderPass = VkRenderPass_Type *;
+
+struct VkSampler_Type;
+using VkSampler = VkSampler_Type *;
+
+struct VkSemaphore_Type;
+using VkSemaphore = VkSemaphore_Type *;
+
+struct VkShaderModule_Type;
+using VkShaderModule = VkShaderModule_Type *;
+
+struct VkSurface_Type;
+using VkSurface = VkSurface_Type *;
+
+struct VkSwapchain_Type;
+using VkSwapchain = VkSwapchain_Type *;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/algorithm.h>
+#include <msp/graphics/vulkancontext_platform.h>
+#include "device.h"
+#include "error.h"
+#include "memoryallocator.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+MemoryAllocator::MemoryAllocator(Device &d):
+ device(d),
+ phys_device(handle_cast<VkPhysicalDevice>(device.get_context().get_private().physical_device))
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkPhysicalDeviceMemoryProperties mem_props;
+ vk.GetPhysicalDeviceMemoryProperties(mem_props);
+
+ const VkMemoryPropertyFlags host_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ memory_types.reserve(mem_props.memoryTypeCount);
+ for(unsigned i=0; i<mem_props.memoryTypeCount; ++i)
+ {
+ VkMemoryPropertyFlags flags = mem_props.memoryTypes[i].propertyFlags;
+ MemoryType type = UNKNOWN_MEMORY;
+ if(flags&VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ {
+ if((flags&host_flags)==host_flags)
+ type = STREAMING_MEMORY;
+ else
+ type = DEVICE_MEMORY;
+ }
+ else if((flags&host_flags)==host_flags)
+ type = STAGING_MEMORY;
+ memory_types.push_back(type);
+ }
+}
+
+unsigned MemoryAllocator::find_memory_type_index(unsigned mask, MemoryType type)
+{
+ for(unsigned i=0; i<memory_types.size(); ++i)
+ if((mask&(1<<i)) && memory_types[i]==type)
+ return i;
+ if(type==DEVICE_MEMORY || type==STAGING_MEMORY)
+ return find_memory_type_index(mask, STREAMING_MEMORY);
+ throw runtime_error("Unable to find suitable memory type");
+}
+
+unsigned MemoryAllocator::allocate(size_t size, unsigned type_bits, MemoryType type)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkMemoryAllocateInfo alloc_info = { };
+ alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ alloc_info.allocationSize = size;
+ alloc_info.memoryTypeIndex = find_memory_type_index(type_bits, type);
+
+ Allocation alloc;
+ vk.AllocateMemory(alloc_info, alloc.memory);
+
+ alloc.type = type;
+ alloc.size = size;
+ allocations.push_back(alloc);
+
+ return allocations.size();
+}
+
+MemoryAllocator::Allocation &MemoryAllocator::get_allocation(unsigned id)
+{
+ return allocations[id-1];
+}
+
+const MemoryAllocator::Allocation &MemoryAllocator::get_allocation(unsigned id) const
+{
+ return allocations[id-1];
+}
+
+unsigned MemoryAllocator::allocate(VkBuffer buffer, MemoryType type)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkMemoryRequirements requirements;
+ vk.GetBufferMemoryRequirements(buffer, requirements);
+
+ unsigned id = allocate(requirements.size, requirements.memoryTypeBits, type);
+
+ vk.BindBufferMemory(buffer, get_allocation(id).memory, 0);
+
+ return id;
+}
+
+void MemoryAllocator::release(unsigned id)
+{
+ Allocation &alloc = get_allocation(id);
+ if(!alloc.memory)
+ throw invalid_operation("MemoryAllocator::release");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.FreeMemory(alloc.memory);
+}
+
+size_t MemoryAllocator::get_allocation_size(unsigned id) const
+{
+ return get_allocation(id).size;
+}
+
+void *MemoryAllocator::map(unsigned id, size_t offset, size_t size)
+{
+ Allocation &alloc = get_allocation(id);
+ if(alloc.mapped_address)
+ throw invalid_operation("MemoryAllocator::map");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.MapMemory(alloc.memory, offset, size, 0, &alloc.mapped_address);
+
+ return alloc.mapped_address;
+}
+
+void MemoryAllocator::unmap(void *ptr)
+{
+ auto i = find_member(allocations, ptr, &Allocation::mapped_address);
+ if(i==allocations.end())
+ throw invalid_operation("MemoryAllocator::unmap");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.UnmapMemory(i->memory);
+ i->mapped_address = 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_MEMORYALLOCATOR_H_
+#define MSP_GL_VULKAN_MEMORYALLOCATOR_H_
+
+#include <vector>
+#include <msp/graphics/vulkancontext.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+enum MemoryType
+{
+ UNKNOWN_MEMORY,
+ DEVICE_MEMORY,
+ STAGING_MEMORY,
+ STREAMING_MEMORY
+};
+
+class MemoryAllocator
+{
+private:
+ struct Allocation
+ {
+ VkDeviceMemory memory = 0;
+ MemoryType type = UNKNOWN_MEMORY;
+ std::size_t size = 0;
+ void *mapped_address = 0;
+ };
+
+ Device &device;
+ VkPhysicalDevice phys_device;
+ std::vector<MemoryType> memory_types;
+ std::vector<Allocation> allocations;
+
+public:
+ MemoryAllocator(Device &);
+
+private:
+ unsigned find_memory_type_index(unsigned, MemoryType);
+ unsigned allocate(std::size_t, unsigned, MemoryType);
+ Allocation &get_allocation(unsigned);
+ const Allocation &get_allocation(unsigned) const;
+
+public:
+ unsigned allocate(VkBuffer, MemoryType);
+ void release(unsigned);
+
+ std::size_t get_allocation_size(unsigned) const;
+
+ void *map(unsigned, std::size_t, std::size_t);
+ void unmap(void *);
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <stdexcept>
+#include "device.h"
+#include "module.h"
+#include "module_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanSpirVModule::VulkanSpirVModule():
+ device(Device::get_current())
+{ }
+
+VulkanSpirVModule::VulkanSpirVModule(VulkanSpirVModule &&other):
+ device(other.device),
+ handle(other.handle)
+{
+ other.handle = 0;
+}
+
+VulkanSpirVModule::~VulkanSpirVModule()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(handle)
+ vk.DestroyShaderModule(handle);
+}
+
+void VulkanSpirVModule::create()
+{
+ const vector<uint32_t> &code = static_cast<const SpirVModule *>(this)->code;
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkShaderModuleCreateInfo module_info = { };
+ module_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ module_info.codeSize = code.size()*4;
+ module_info.pCode = code.data();
+
+ vk.CreateShaderModule(module_info, handle);
+}
+
+
+unsigned get_vulkan_stage(unsigned stage)
+{
+ switch(stage)
+ {
+ case SpirVModule::VERTEX: return VK_SHADER_STAGE_VERTEX_BIT;
+ case SpirVModule::GEOMETRY: return VK_SHADER_STAGE_GEOMETRY_BIT;
+ case SpirVModule::FRAGMENT: return VK_SHADER_STAGE_FRAGMENT_BIT;
+ default: throw invalid_argument("get_vulkan_stage");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_MODULE_BACKEND_H_
+#define MSP_GL_MODULE_BACKEND_H_
+
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanSpirVModule
+{
+ friend class VulkanProgram;
+
+protected:
+ Device &device;
+ VkShaderModule handle = 0;
+
+ VulkanSpirVModule();
+ VulkanSpirVModule(VulkanSpirVModule &&);
+ ~VulkanSpirVModule();
+
+ void create();
+};
+
+using SpirVModuleBackend = VulkanSpirVModule;
+
+unsigned get_vulkan_stage(unsigned);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/hash.h>
+#include "blend.h"
+#include "depthtest.h"
+#include "device.h"
+#include "framebuffer.h"
+#include "pipelinecache.h"
+#include "pipelinestate.h"
+#include "stenciltest.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+PipelineCache::PipelineCache(Device &d):
+ device(d)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkDescriptorPoolSize pool_sizes[2] = { };
+ pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ pool_sizes[0].descriptorCount = 10000;
+ pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ pool_sizes[1].descriptorCount = 10000;
+
+ VkDescriptorPoolCreateInfo pool_info = { };
+ pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ pool_info.maxSets = 10000;
+ pool_info.poolSizeCount = 2;
+ pool_info.pPoolSizes = pool_sizes;
+
+ vk.CreateDescriptorPool(pool_info, descriptor_pool);
+}
+
+PipelineCache::~PipelineCache()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ for(const auto &kvp: render_passes)
+ vk.DestroyRenderPass(kvp.second);
+ for(const auto &kvp: pipelines)
+ vk.DestroyPipeline(kvp.second);
+ vk.DestroyDescriptorPool(descriptor_pool);
+}
+
+VkRenderPass PipelineCache::get_render_pass(const FrameFormat &format, bool is_cleared, bool to_present)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ uint64_t key = hash<64>(static_cast<uint8_t>(is_cleared | (to_present*2)));
+ for(FrameAttachment a: format)
+ key = hash_update<64>(key, a);
+
+ auto j = render_passes.find(key);
+ if(j!=render_passes.end())
+ return j->second;
+
+ VkAttachmentDescription attachments[FrameFormat::MAX_ATTACHMENTS] = { };
+ VkAttachmentReference color_refs[FrameFormat::MAX_ATTACHMENTS] = { };
+ VkAttachmentReference depth_stencil_ref = { };
+ depth_stencil_ref.attachment = VK_ATTACHMENT_UNUSED;
+
+ VkSampleCountFlagBits vk_samples = static_cast<VkSampleCountFlagBits>(get_vulkan_samples(format.get_samples()));
+
+ unsigned i = 0;
+ unsigned color_count = 0;
+ for(FrameAttachment a: format)
+ {
+ attachments[i].format = static_cast<VkFormat>(get_vulkan_pixelformat(get_attachment_pixelformat(a)));
+ attachments[i].samples = vk_samples;
+ attachments[i].loadOp = (is_cleared ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD);
+ attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[i].initialLayout = (is_cleared ? VK_IMAGE_LAYOUT_UNDEFINED : VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ attachments[i].finalLayout = (to_present ? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR : VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+
+ unsigned attach_pt = get_attach_point(a);
+ if(attach_pt==get_attach_point(COLOR_ATTACHMENT))
+ {
+ color_refs[color_count].attachment = i;
+ color_refs[color_count].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ ++color_count;
+ }
+ else if(attach_pt==get_attach_point(DEPTH_ATTACHMENT))
+ {
+ depth_stencil_ref.attachment = i;
+ depth_stencil_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ }
+
+ ++i;
+ }
+
+ VkSubpassDescription subpass = { };
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = color_count;
+ subpass.pColorAttachments = color_refs;
+ subpass.pDepthStencilAttachment = &depth_stencil_ref;
+
+ VkRenderPassCreateInfo render_pass_info = { };
+ render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ render_pass_info.attachmentCount = format.size();
+ render_pass_info.pAttachments = attachments;
+ render_pass_info.subpassCount = 1;
+ render_pass_info.pSubpasses = &subpass;
+
+ VkRenderPass render_pass;
+ vk.CreateRenderPass(render_pass_info, render_pass);
+
+ render_passes.insert(make_pair(key, render_pass));
+
+ return render_pass;
+}
+
+VkPipeline PipelineCache::get_pipeline(const PipelineState &ps)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ uint64_t key = ps.compute_hash();
+ auto i = pipelines.find(key);
+ if(i!=pipelines.end())
+ return i->second;
+
+ vector<char> buffer;
+ ps.fill_creation_info(buffer);
+ const VkGraphicsPipelineCreateInfo *creation_info = reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(buffer.data());
+
+ VkPipeline pipeline;
+ vk.CreateGraphicsPipelines(0, 1, creation_info, &pipeline);
+
+ pipelines.insert(make_pair(key, pipeline));
+
+ return pipeline;
+}
+
+VkDescriptorSet PipelineCache::get_descriptor_set(const PipelineState &ps, unsigned index)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ uint64_t key = ps.compute_descriptor_set_hash(index);
+ auto i = descriptor_sets.find(key);
+ if(i!=descriptor_sets.end())
+ return i->second;
+
+ VkDescriptorSetLayout layout = ps.get_descriptor_set_layout(index);
+
+ VkDescriptorSetAllocateInfo alloc_info = { };
+ alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ alloc_info.descriptorPool = handle_cast<::VkDescriptorPool>(descriptor_pool);
+ alloc_info.descriptorSetCount = 1;
+ alloc_info.pSetLayouts = handle_cast<::VkDescriptorSetLayout *>(&layout);
+
+ VkDescriptorSet desc_set;
+ vk.AllocateDescriptorSets(alloc_info, &desc_set);
+
+ vector<char> buffer;
+ unsigned n_writes = ps.fill_descriptor_writes(index, buffer);
+ VkWriteDescriptorSet *writes = reinterpret_cast<VkWriteDescriptorSet *>(buffer.data());
+ for(unsigned j=0; j<n_writes; ++j)
+ writes[j].dstSet = handle_cast<::VkDescriptorSet>(desc_set);
+
+ vk.UpdateDescriptorSets(n_writes, writes, 0, 0);
+
+ descriptor_sets.insert(make_pair(key, desc_set));
+
+ return desc_set;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_PIPELINECACHE_H_
+#define MSP_GL_VULKAN_PIPELINECACHE_H_
+
+#include <cstdint>
+#include <map>
+#include "frameformat.h"
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+class PipelineState;
+
+class PipelineCache
+{
+private:
+ Device &device;
+ VkDescriptorPool descriptor_pool;
+ std::map<std::uint64_t, VkRenderPass> render_passes;
+ std::map<std::uint64_t, VkPipeline> pipelines;
+ std::map<std::uint64_t, VkDescriptorSet> descriptor_sets;
+
+public:
+ PipelineCache(Device &);
+ PipelineCache(PipelineCache &&);
+ ~PipelineCache();
+
+ VkRenderPass get_render_pass(const FrameFormat &, bool, bool);
+ VkPipeline get_pipeline(const PipelineState &);
+ VkDescriptorSet get_descriptor_set(const PipelineState &, unsigned);
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/hash.h>
+#include "batch.h"
+#include "blend.h"
+#include "buffer.h"
+#include "depthtest.h"
+#include "device.h"
+#include "framebuffer.h"
+#include "pipelinestate.h"
+#include "pipelinestate_backend.h"
+#include "program.h"
+#include "rect.h"
+#include "sampler.h"
+#include "stenciltest.h"
+#include "structurebuilder.h"
+#include "texture.h"
+#include "uniformblock.h"
+#include "vertexsetup.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanPipelineState::VulkanPipelineState():
+ device(Device::get_current())
+{ }
+
+VulkanPipelineState::VulkanPipelineState(VulkanPipelineState &&other):
+ device(other.device),
+ handle(other.handle)
+{ }
+
+void VulkanPipelineState::update() const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+
+ constexpr unsigned pipeline_mask = PipelineState::SHPROG|PipelineState::VERTEX_SETUP|PipelineState::FACE_CULL|
+ PipelineState::DEPTH_TEST|PipelineState::STENCIL_TEST|PipelineState::BLEND|PipelineState::PRIMITIVE_TYPE;
+ if(self.changes&pipeline_mask)
+ handle = device.get_pipeline_cache().get_pipeline(self);
+
+ if(self.changes&(PipelineState::UNIFORMS|PipelineState::TEXTURES))
+ {
+ unsigned changed_sets = 0;
+ for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
+ if(u.changed && u.binding>=0)
+ {
+ changed_sets |= 1<<(u.binding>>20);
+ u.changed = false;
+ }
+
+ descriptor_set_handles.resize(self.shprog->get_n_descriptor_sets());
+ for(unsigned i=0; i<descriptor_set_handles.size(); ++i)
+ if(changed_sets&(1<<i))
+ descriptor_set_handles[i] = device.get_pipeline_cache().get_descriptor_set(self, i);
+ }
+
+ self.changes = 0;
+}
+
+uint64_t VulkanPipelineState::compute_hash() const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+ const FrameFormat &format = self.framebuffer->get_format();
+
+ uint64_t result = hash<64>(self.shprog);
+ result = hash_update<64>(result, self.vertex_setup->compute_hash());
+ result = hash_round<64>(result, self.primitive_type);
+
+ if(self.front_face!=NON_MANIFOLD && self.face_cull!=NO_CULL)
+ {
+ result = hash_round<64>(result, self.front_face);
+ result = hash_round<64>(result, self.face_cull);
+ }
+
+ result = hash_round<64>(result, format.get_samples());
+
+ if(const DepthTest *depth_test = self.depth_test)
+ if(depth_test->enabled)
+ {
+ result = hash_round<64>(result, depth_test->compare);
+ result = hash_update<64>(result, depth_test->write);
+ }
+
+ if(const StencilTest *stencil_test = self.stencil_test)
+ if(stencil_test->enabled)
+ {
+ result = hash_round<64>(result, stencil_test->compare);
+ result = hash_round<64>(result, stencil_test->stencil_fail_op);
+ result = hash_round<64>(result, stencil_test->depth_fail_op);
+ result = hash_round<64>(result, stencil_test->depth_pass_op);
+ result = hash_update<64>(result, stencil_test->reference);
+ }
+
+ if(const Blend *blend = self.blend)
+ if(blend->enabled)
+ {
+ result = hash_round<64>(result, blend->equation);
+ result = hash_round<64>(result, blend->src_factor);
+ result = hash_round<64>(result, blend->dst_factor);
+ result = hash_round<64>(result, blend->write_mask);
+ }
+
+ for(FrameAttachment a: format)
+ result = hash_update<64>(result, a);
+
+ return result;
+}
+
+void VulkanPipelineState::fill_creation_info(vector<char> &buffer) const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+
+ const FrameFormat &format = self.framebuffer->get_format();
+ VkRenderPass render_pass = device.get_pipeline_cache().get_render_pass(format, false, false);
+
+ unsigned n_color_attachments = 0;
+ for(FrameAttachment a: format)
+ {
+ unsigned attach_pt = get_attach_point(a);
+ if(attach_pt!=get_attach_point(DEPTH_ATTACHMENT) && attach_pt!=get_attach_point(STENCIL_ATTACHMENT))
+ ++n_color_attachments;
+ }
+
+ StructureBuilder sb(buffer, 10);
+ VkGraphicsPipelineCreateInfo *&pipeline_info = sb.add<VkGraphicsPipelineCreateInfo>();
+ VkPipelineInputAssemblyStateCreateInfo *&input_assembly_info = sb.add<VkPipelineInputAssemblyStateCreateInfo>();
+ VkPipelineViewportStateCreateInfo *&viewport_info = sb.add<VkPipelineViewportStateCreateInfo>();
+ VkPipelineRasterizationStateCreateInfo *&raster_info = sb.add<VkPipelineRasterizationStateCreateInfo>();
+ VkPipelineMultisampleStateCreateInfo *&multisample_info = sb.add<VkPipelineMultisampleStateCreateInfo>();
+ VkPipelineDepthStencilStateCreateInfo *&depth_stencil_info = sb.add<VkPipelineDepthStencilStateCreateInfo>();
+ VkPipelineColorBlendStateCreateInfo *&blend_info = sb.add<VkPipelineColorBlendStateCreateInfo>();
+ VkPipelineColorBlendAttachmentState *&blend_attachments = sb.add<VkPipelineColorBlendAttachmentState>(n_color_attachments);
+ VkPipelineDynamicStateCreateInfo *&dynamic_info = sb.add<VkPipelineDynamicStateCreateInfo>();
+ VkDynamicState *&dynamic_states = sb.add<VkDynamicState>(2);
+
+ input_assembly_info->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ input_assembly_info->topology = static_cast<VkPrimitiveTopology>(get_vulkan_primitive_type(self.primitive_type));
+ input_assembly_info->primitiveRestartEnable = true;
+
+ viewport_info->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_info->viewportCount = 1;
+ viewport_info->pViewports = 0;
+ viewport_info->scissorCount = 1;
+ viewport_info->pScissors = 0;
+
+ raster_info->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ raster_info->depthClampEnable = VK_FALSE;
+ raster_info->rasterizerDiscardEnable = VK_FALSE;
+ raster_info->polygonMode = VK_POLYGON_MODE_FILL;
+ if(self.face_cull==NO_CULL || self.front_face==NON_MANIFOLD)
+ {
+ raster_info->cullMode = VK_CULL_MODE_NONE;
+ raster_info->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ }
+ else
+ {
+ raster_info->cullMode = (self.face_cull==CULL_FRONT ? VK_CULL_MODE_FRONT_BIT : VK_CULL_MODE_BACK_BIT);
+ raster_info->frontFace = (self.front_face==CLOCKWISE ? VK_FRONT_FACE_CLOCKWISE : VK_FRONT_FACE_COUNTER_CLOCKWISE);
+ }
+ raster_info->depthBiasEnable = VK_FALSE;
+ raster_info->depthBiasConstantFactor = 0.0f;
+ raster_info->depthBiasClamp = 0.0f;
+ raster_info->depthBiasSlopeFactor = 0.0f;
+ raster_info->lineWidth = 1.0f;
+
+ multisample_info->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample_info->rasterizationSamples = static_cast<VkSampleCountFlagBits>(get_vulkan_samples(format.get_samples()));
+ multisample_info->sampleShadingEnable = VK_FALSE;
+ multisample_info->minSampleShading = 1.0f;
+ multisample_info->pSampleMask = 0;
+ multisample_info->alphaToCoverageEnable = VK_FALSE;
+ multisample_info->alphaToOneEnable = VK_FALSE;
+
+ depth_stencil_info->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ if(const DepthTest *depth_test = self.depth_test)
+ {
+ depth_stencil_info->depthTestEnable = depth_test->enabled;
+ depth_stencil_info->depthWriteEnable = depth_test->write;
+ depth_stencil_info->depthCompareOp = static_cast<VkCompareOp>(get_vulkan_predicate(depth_test->compare));
+ depth_stencil_info->depthBoundsTestEnable = VK_FALSE;
+ }
+ if(const StencilTest *stencil_test = self.stencil_test)
+ {
+ depth_stencil_info->stencilTestEnable = stencil_test->enabled;
+ depth_stencil_info->front.failOp = static_cast<VkStencilOp>(get_vulkan_stencil_op(stencil_test->stencil_fail_op));
+ depth_stencil_info->front.passOp = static_cast<VkStencilOp>(get_vulkan_stencil_op(stencil_test->depth_pass_op));
+ depth_stencil_info->front.depthFailOp = static_cast<VkStencilOp>(get_vulkan_stencil_op(stencil_test->depth_fail_op));
+ depth_stencil_info->front.compareOp = static_cast<VkCompareOp>(get_vulkan_predicate(stencil_test->compare));
+ depth_stencil_info->front.compareMask = 0xFFFFFFFFU;
+ depth_stencil_info->front.writeMask = 0xFFFFFFFFU;
+ depth_stencil_info->front.reference = stencil_test->reference;
+ depth_stencil_info->back = depth_stencil_info->front;
+ }
+
+ if(const Blend *blend = self.blend)
+ {
+ for(unsigned i=0; i<n_color_attachments; ++i)
+ {
+ blend_attachments[i].blendEnable = blend->enabled;
+ blend_attachments[i].srcColorBlendFactor = static_cast<VkBlendFactor>(get_vulkan_blend_factor(blend->src_factor));
+ blend_attachments[i].dstColorBlendFactor = static_cast<VkBlendFactor>(get_vulkan_blend_factor(blend->dst_factor));
+ blend_attachments[i].colorBlendOp = static_cast<VkBlendOp>(get_vulkan_blend_equation(blend->equation));
+ blend_attachments[i].srcAlphaBlendFactor = blend_attachments[i].srcColorBlendFactor;
+ blend_attachments[i].dstAlphaBlendFactor = blend_attachments[i].dstColorBlendFactor;
+ blend_attachments[i].alphaBlendOp = blend_attachments[i].colorBlendOp;
+ blend_attachments[i].colorWriteMask = get_vulkan_color_mask(blend->write_mask);
+ }
+ }
+
+ blend_info->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ blend_info->attachmentCount = n_color_attachments;
+ blend_info->pAttachments = blend_attachments;
+
+ dynamic_states[0] = VK_DYNAMIC_STATE_VIEWPORT;
+ dynamic_states[1] = VK_DYNAMIC_STATE_SCISSOR;
+
+ dynamic_info->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic_info->dynamicStateCount = 2;
+ dynamic_info->pDynamicStates = dynamic_states;
+
+ pipeline_info->sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+
+ pipeline_info->pInputAssemblyState = input_assembly_info;
+ pipeline_info->pTessellationState = 0;
+ pipeline_info->pViewportState = viewport_info;
+ pipeline_info->pRasterizationState = raster_info;
+ pipeline_info->pMultisampleState = multisample_info;
+ pipeline_info->pDepthStencilState = depth_stencil_info;
+ pipeline_info->pColorBlendState = blend_info;
+ pipeline_info->pDynamicState = dynamic_info;
+ pipeline_info->renderPass = handle_cast<::VkRenderPass>(render_pass);
+ pipeline_info->subpass = 0;
+
+ if(self.shprog)
+ {
+ pipeline_info->stageCount = self.shprog->n_stages;
+ pipeline_info->pStages = reinterpret_cast<const VkPipelineShaderStageCreateInfo *>(self.shprog->creation_info.data());
+ pipeline_info->layout = handle_cast<::VkPipelineLayout>(self.shprog->layout_handle);
+ }
+
+ if(self.vertex_setup)
+ {
+ self.vertex_setup->refresh();
+ pipeline_info->pVertexInputState = reinterpret_cast<const VkPipelineVertexInputStateCreateInfo *>(self.vertex_setup->creation_info.data());
+ }
+}
+
+uint64_t VulkanPipelineState::compute_descriptor_set_hash(unsigned index) const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+
+ uint64_t result = hash<64>(0, 0);
+ for(const PipelineState::BoundUniformBlock &b: self.uniform_blocks)
+ if(b.block && b.binding>=0 && static_cast<unsigned>(b.binding>>20)==index)
+ {
+ result = hash_update<64>(result, b.binding);
+ result = hash_update<64>(result, reinterpret_cast<uintptr_t>(b.block));
+ }
+
+ return result;
+}
+
+VkDescriptorSetLayout VulkanPipelineState::get_descriptor_set_layout(unsigned index) const
+{
+ return static_cast<const PipelineState *>(this)->shprog->desc_set_layout_handles[index];
+}
+
+unsigned VulkanPipelineState::fill_descriptor_writes(unsigned index, vector<char> &buffer) const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+
+ unsigned n_buffers = 0;
+ for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
+ if(u.block && u.binding>=0 && static_cast<unsigned>(u.binding>>20)==index)
+ ++n_buffers;
+
+ StructureBuilder sb(buffer, 2);
+ VkWriteDescriptorSet *&writes = sb.add<VkWriteDescriptorSet>(n_buffers);
+ VkDescriptorBufferInfo *&buffers = sb.add<VkDescriptorBufferInfo>(n_buffers);
+
+ VkWriteDescriptorSet *write_ptr = writes;
+ VkDescriptorBufferInfo *buffer_ptr = buffers;
+
+ for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
+ if(u.block && u.binding>=0 && static_cast<unsigned>(u.binding>>20)==index)
+ {
+ buffer_ptr->buffer = handle_cast<::VkBuffer>(u.block->get_buffer()->handle);
+ buffer_ptr->offset = u.block->get_offset();
+ buffer_ptr->range = u.block->get_data_size();
+
+ write_ptr->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write_ptr->dstBinding = u.binding&0xFFFFF;
+ write_ptr->descriptorCount = 1;
+ write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write_ptr->pBufferInfo = buffer_ptr;
+
+ ++buffer_ptr;
+ ++write_ptr;
+ }
+
+ return n_buffers;
+}
+
+void VulkanPipelineState::apply(VkCommandBuffer command_buffer) const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.CmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, handle);
+ if(const VertexSetup *vs = self.vertex_setup)
+ {
+ vk.CmdBindVertexBuffers(command_buffer, 0, vs->n_bindings, vs->buffers, vs->offsets);
+ VkIndexType index_type = static_cast<VkIndexType>(get_vulkan_index_type(vs->get_index_type()));
+ vk.CmdBindIndexBuffer(command_buffer, vs->get_index_buffer()->handle, 0, index_type);
+ }
+
+ if(!self.uniform_blocks.empty())
+ {
+ const PipelineState::BoundUniformBlock &first_block = self.uniform_blocks.front();
+ if(first_block.block && first_block.binding==ReflectData::PUSH_CONSTANT && first_block.changed)
+ {
+ const UniformBlock &pc_block = *first_block.block;
+ vk.CmdPushConstants(command_buffer, self.shprog->layout_handle, VK_SHADER_STAGE_ALL,
+ pc_block.get_offset(), pc_block.get_data_size(), pc_block.get_data_pointer());
+ }
+ }
+
+ vk.CmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.shprog->layout_handle, 0, descriptor_set_handles.size(), descriptor_set_handles.data(), 0, 0);
+
+ VkViewport viewport = { };
+ if(self.viewport)
+ {
+ viewport.x = self.viewport->left;
+ viewport.y = self.framebuffer->get_height()-(self.viewport->bottom+self.viewport->height);
+ viewport.width = self.viewport->width;
+ viewport.height = self.viewport->height;
+ }
+ else
+ {
+ viewport.x = 0;
+ viewport.y = 0;
+ viewport.width = self.framebuffer->get_width();
+ viewport.height = self.framebuffer->get_height();
+ }
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ vk.CmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor = { };
+ if(self.scissor)
+ {
+ scissor.offset.x = self.scissor->left;
+ scissor.offset.y = self.framebuffer->get_height()-(self.scissor->bottom+self.scissor->height);
+ scissor.extent.width = self.scissor->width;
+ scissor.extent.height = self.scissor->height;
+ }
+ else
+ {
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ scissor.extent.width = self.framebuffer->get_width();
+ scissor.extent.height = self.framebuffer->get_height();
+ }
+ vk.CmdSetScissor(command_buffer, 0, 1, &scissor);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_PIPELINESTATE_BACKEND_H_
+#define MSP_GL_PIPELINESTATE_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanPipelineState: public NonCopyable
+{
+ friend class PipelineCache;
+ friend class VulkanCommands;
+
+protected:
+ Device &device;
+ mutable unsigned changes = 0;
+ mutable VkPipeline handle;
+ mutable std::vector<VkDescriptorSet> descriptor_set_handles;
+
+ VulkanPipelineState();
+ VulkanPipelineState(VulkanPipelineState &&);
+
+ void update() const;
+ void refresh() const { if(changes) update(); }
+ std::uint64_t compute_hash() const;
+ void fill_creation_info(std::vector<char> &) const;
+ std::uint64_t compute_descriptor_set_hash(unsigned) const;
+ VkDescriptorSetLayout get_descriptor_set_layout(unsigned) const;
+ unsigned fill_descriptor_writes(unsigned, std::vector<char> &) const;
+
+ void apply(VkCommandBuffer) const;
+};
+
+using PipelineStateBackend = VulkanPipelineState;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "pixelformat.h"
+#include "pixelformat_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace {
+
+static unsigned swizzle_orders[] =
+{
+ VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A,
+ VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_A,
+ VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_ONE
+};
+
+}
+
+namespace Msp {
+namespace GL {
+
+ComponentSwizzle get_required_swizzle(PixelComponents comp)
+{
+ switch(comp)
+ {
+ case RGB: return RGBA_TO_RGB;
+ case LUMINANCE: return R_TO_LUMINANCE;
+ case LUMINANCE_ALPHA: return RG_TO_LUMINANCE_ALPHA;
+ default: return NO_SWIZZLE;
+ }
+}
+
+void require_pixelformat(PixelFormat)
+{ }
+
+unsigned get_vulkan_pixelformat(PixelFormat pf)
+{
+ switch(pf)
+ {
+ case R8: return VK_FORMAT_R8_UNORM;
+ case R16F: return VK_FORMAT_R16_SFLOAT;
+ case R32F: return VK_FORMAT_R32_SFLOAT;
+ case RG8: return VK_FORMAT_R8G8_UNORM;
+ case RG16F: return VK_FORMAT_R16G16_SFLOAT;
+ case RG32F: return VK_FORMAT_R32G32_SFLOAT;
+ case RGB8: return VK_FORMAT_R8G8B8_UNORM;
+ case RGB16F: return VK_FORMAT_R16G16B16_SFLOAT;
+ case RGB32F: return VK_FORMAT_R32G32B32_SFLOAT;
+ case RGBA8: return VK_FORMAT_R8G8B8A8_UNORM;
+ case RGBA16F: return VK_FORMAT_R16G16B16A16_SFLOAT;
+ case RGBA32F: return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case SRGB8: return VK_FORMAT_R8G8B8_SRGB;
+ case SRGB8_ALPHA8: return VK_FORMAT_R8G8B8A8_SRGB;
+ case BGR8: return VK_FORMAT_B8G8R8_UNORM;
+ case BGRA8: return VK_FORMAT_B8G8R8A8_UNORM;
+ case SBGR8: return VK_FORMAT_B8G8R8_SRGB;
+ case SBGR8_ALPHA8: return VK_FORMAT_B8G8R8A8_SRGB;
+ case DEPTH_COMPONENT16: return VK_FORMAT_D16_UNORM;
+ case DEPTH_COMPONENT24: return VK_FORMAT_X8_D24_UNORM_PACK32;
+ case DEPTH_COMPONENT32F: return VK_FORMAT_D32_SFLOAT;
+ case STENCIL_INDEX8: return VK_FORMAT_S8_UINT;
+ default: throw invalid_argument("get_vulkan_pixelformat");
+ }
+}
+
+unsigned get_vulkan_aspect(PixelComponents comp)
+{
+ switch(comp)
+ {
+ case DEPTH_COMPONENT: return VK_IMAGE_ASPECT_DEPTH_BIT;
+ case STENCIL_INDEX: return VK_IMAGE_ASPECT_STENCIL_BIT;
+ default: return VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+}
+
+PixelFormat pixelformat_from_vulkan(unsigned vkf)
+{
+ switch(vkf)
+ {
+ case VK_FORMAT_R8_UNORM: return R8;
+ case VK_FORMAT_R16_SFLOAT: return R16F;
+ case VK_FORMAT_R32_SFLOAT: return R32F;
+ case VK_FORMAT_R8G8_UNORM: return RG8;
+ case VK_FORMAT_R16G16_SFLOAT: return RG16F;
+ case VK_FORMAT_R32G32_SFLOAT: return RG32F;
+ case VK_FORMAT_R8G8B8_UNORM: return RGB8;
+ case VK_FORMAT_R16G16B16_SFLOAT: return RGB16F;
+ case VK_FORMAT_R32G32B32_SFLOAT: return RGB32F;
+ case VK_FORMAT_R8G8B8A8_UNORM: return RGBA8;
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return RGBA16F;
+ case VK_FORMAT_R32G32B32A32_SFLOAT: return RGBA32F;
+ case VK_FORMAT_R8G8B8_SRGB: return SRGB8;
+ case VK_FORMAT_R8G8B8A8_SRGB: return SRGB8_ALPHA8;
+ case VK_FORMAT_B8G8R8_UNORM: return BGR8;
+ case VK_FORMAT_B8G8R8A8_UNORM: return BGRA8;
+ case VK_FORMAT_B8G8R8_SRGB: return SBGR8;
+ case VK_FORMAT_B8G8R8A8_SRGB: return SBGR8_ALPHA8;
+ case VK_FORMAT_D16_UNORM: return DEPTH_COMPONENT16;
+ case VK_FORMAT_X8_D24_UNORM_PACK32: return DEPTH_COMPONENT24;
+ case VK_FORMAT_D32_SFLOAT: return DEPTH_COMPONENT32F;
+ case VK_FORMAT_S8_UINT: return STENCIL_INDEX8;
+ default: throw invalid_argument("pixelformat_from_vulkan");
+ }
+}
+
+const unsigned *get_vulkan_swizzle(ComponentSwizzle swiz)
+{
+ return swizzle_orders+4*swiz;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_PIXELFORMAT_BACKEND_H_
+#define MSP_GL_PIXELFORMAT_BACKEND_H_
+
+#ifndef MSP_GL_PIXELFORMAT_H_
+#error "pixelformat_backend.h requires pixelformat.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_pixelformat(PixelFormat);
+unsigned get_vulkan_aspect(PixelComponents);
+PixelFormat pixelformat_from_vulkan(unsigned);
+const unsigned *get_vulkan_swizzle(ComponentSwizzle);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "predicate.h"
+#include "predicate_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_predicate(Predicate pred)
+{
+ switch(pred)
+ {
+ case NEVER: return VK_COMPARE_OP_NEVER;
+ case ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ case LESS: return VK_COMPARE_OP_LESS;
+ case LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case EQUAL: return VK_COMPARE_OP_EQUAL;
+ case GREATER: return VK_COMPARE_OP_GREATER;
+ case GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ default: throw invalid_argument("get_vulkan_predicate");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_PREDICATE_BACKEND_H_
+#define MSP_GL_PREDICATE_BACKEND_H_
+
+#ifndef MSP_GL_PREDICATE_H_
+#error "predicate_backend.h requires predicate.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_predicate(Predicate);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "primitivetype.h"
+#include "primitivetype_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_primitive_type(PrimitiveType pt)
+{
+ switch(pt)
+ {
+ case POINTS: return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+ case LINES: return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+ case LINE_STRIP: return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+ case TRIANGLES: return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ case TRIANGLE_STRIP: return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+ case TRIANGLE_FAN: return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
+ default: throw invalid_argument("get_vulkan_primitive_type");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_PRIMITIVETYPE_BACKEND_H_
+#define MSP_GL_PRIMITIVETYPE_BACKEND_H_
+
+#ifndef MSP_GL_PRIMITIVETYPE_H_
+#error "primitivetype_backend.h requires primitivetype.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_primitive_type(PrimitiveType);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <cstring>
+#include <msp/core/algorithm.h>
+#include "device.h"
+#include "error.h"
+#include "program.h"
+#include "program_backend.h"
+#include "structurebuilder.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanProgram::VulkanProgram():
+ device(Device::get_current())
+{ }
+
+VulkanProgram::VulkanProgram(VulkanProgram &&other):
+ device(other.device),
+ n_stages(other.n_stages),
+ creation_info(move(other.creation_info)),
+ desc_set_layout_handles(move(other.desc_set_layout_handles)),
+ layout_handle(other.layout_handle)
+{
+ other.desc_set_layout_handles.clear();
+ other.layout_handle = 0;
+}
+
+VulkanProgram::~VulkanProgram()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(layout_handle)
+ vk.DestroyPipelineLayout(layout_handle);
+ for(VkDescriptorSetLayout d: desc_set_layout_handles)
+ vk.DestroyDescriptorSetLayout(d);
+}
+
+bool VulkanProgram::has_stages() const
+{
+ return n_stages;
+}
+
+void VulkanProgram::add_glsl_stages(const GlslModule &, const map<string, int> &, TransientData &)
+{
+ throw invalid_operation("VulkanProgram::add_glsl_stages");
+}
+
+void VulkanProgram::add_spirv_stages(const SpirVModule &mod, const map<string, int> &spec_values)
+{
+ const vector<SpirVModule::EntryPoint> &entry_points = mod.get_entry_points();
+
+ n_stages = entry_points.size();
+ size_t entry_names_size = 0;
+ for(const SpirVModule::EntryPoint &e: entry_points)
+ entry_names_size += e.name.size()+1;
+
+ StructureBuilder sb(creation_info, 5);
+ VkPipelineShaderStageCreateInfo *&stage_infos = sb.add<VkPipelineShaderStageCreateInfo>(n_stages);
+ char *&name_table = sb.add<char>(entry_names_size);
+ VkSpecializationInfo *&spec_info = sb.add<VkSpecializationInfo>();
+ VkSpecializationMapEntry *&spec_map = sb.add<VkSpecializationMapEntry>(spec_values.size());
+ int *&spec_data = sb.add<int>(spec_values.size());
+
+ unsigned i = 0;
+ for(const SpirVModule::Constant &c: mod.get_spec_constants())
+ {
+ auto j = spec_values.find(c.name);
+ if(j!=spec_values.end())
+ {
+ spec_map[i].constantID = c.constant_id;
+ spec_map[i].offset = i*sizeof(int);
+ spec_map[i].size = sizeof(int);
+ spec_data[i] = j->second;
+ ++i;
+ }
+ }
+
+ spec_info->mapEntryCount = i;
+ spec_info->pMapEntries = spec_map;
+ spec_info->dataSize = spec_values.size()*sizeof(int);
+ spec_info->pData = spec_data;
+
+ char *name_ptr = name_table;
+ i = 0;
+ for(const SpirVModule::EntryPoint &e: entry_points)
+ {
+ stage_infos[i].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage_infos[i].stage = static_cast<VkShaderStageFlagBits>(get_vulkan_stage(e.stage));
+ stage_infos[i].module = handle_cast<::VkShaderModule>(mod.handle);
+ strcpy(name_ptr, e.name.c_str());
+ stage_infos[i].pName = name_ptr;
+ name_ptr += e.name.size()+1;
+ stage_infos[i].pSpecializationInfo = spec_info;
+ ++i;
+ }
+}
+
+void VulkanProgram::finalize_uniforms()
+{
+ const VulkanFunctions &vk = device.get_functions();
+ const ReflectData &rd = static_cast<const Program *>(this)->reflect_data;
+
+ auto i = find_member(rd.uniform_blocks, static_cast<int>(ReflectData::PUSH_CONSTANT), &ReflectData::UniformBlockInfo::bind_point);
+ const ReflectData::UniformBlockInfo *push_const_block = (i!=rd.uniform_blocks.end() ? &*i : 0);
+
+ desc_set_layout_handles.resize(rd.n_descriptor_sets);
+ for(unsigned j=0; j<rd.n_descriptor_sets; ++j)
+ {
+ std::vector<VkDescriptorSetLayoutBinding> bindings;
+ for(const ReflectData::UniformBlockInfo &b: rd.uniform_blocks)
+ if(b.bind_point>=0 && static_cast<unsigned>(b.bind_point>>20)==j)
+ {
+ bindings.emplace_back();
+ VkDescriptorSetLayoutBinding &binding = bindings.back();
+ binding.binding = b.bind_point;
+ binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ binding.descriptorCount = 1;
+ binding.stageFlags = VK_SHADER_STAGE_ALL;
+ binding.pImmutableSamplers = 0;
+ }
+
+ for(const ReflectData::UniformInfo &u: rd.uniforms)
+ if(u.binding>=0 && static_cast<unsigned>(u.binding>>20)==j && is_image(u.type))
+ {
+ bindings.emplace_back();
+ VkDescriptorSetLayoutBinding &binding = bindings.back();
+ binding.binding = u.binding;
+ binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ binding.descriptorCount = 1;
+ binding.stageFlags = VK_SHADER_STAGE_ALL;
+ binding.pImmutableSamplers = 0;
+ }
+
+ VkDescriptorSetLayoutCreateInfo set_layout_info = { };
+ set_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ set_layout_info.bindingCount = bindings.size();
+ set_layout_info.pBindings = bindings.data();
+
+ vk.CreateDescriptorSetLayout(set_layout_info, desc_set_layout_handles[j]);
+ }
+
+ VkPushConstantRange push_const_range = { };
+ push_const_range.stageFlags = VK_SHADER_STAGE_ALL;
+ push_const_range.offset = 0;
+ push_const_range.size = (push_const_block ? push_const_block->data_size : 0);
+
+ VkPipelineLayoutCreateInfo layout_info = { };
+ layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layout_info.setLayoutCount = rd.n_descriptor_sets;
+ layout_info.pSetLayouts = handle_cast<::VkDescriptorSetLayout *>(desc_set_layout_handles.data());
+ layout_info.pushConstantRangeCount = (push_const_block ? 1 : 0);
+ layout_info.pPushConstantRanges = &push_const_range;
+
+ vk.CreatePipelineLayout(layout_info, layout_handle);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_PROGRAM_BACKEND_H_
+#define MSP_GL_PROGRAM_BACKEND_H_
+
+#include <map>
+#include <string>
+#include <vector>
+#include <msp/core/noncopyable.h>
+#include "reflectdata.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanProgram: public NonCopyable
+{
+ friend class VulkanPipelineState;
+
+protected:
+ struct TransientData
+ { };
+
+ Device &device;
+ unsigned n_stages = 0;
+ std::vector<char> creation_info;
+ std::vector<VkDescriptorSetLayout> desc_set_layout_handles;
+ VkPipelineLayout layout_handle = 0;
+
+ VulkanProgram();
+ VulkanProgram(VulkanProgram &&);
+ ~VulkanProgram();
+
+ bool has_stages() const;
+ void add_glsl_stages(const GlslModule &, const std::map<std::string, int> &, TransientData &);
+ void add_spirv_stages(const SpirVModule &, const std::map<std::string, int> &);
+
+ void finalize(const Module &, TransientData &) { }
+ void finalize_uniforms();
+
+ void set_debug_name(const std::string &) { }
+};
+
+using ProgramBackend = VulkanProgram;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <stdexcept>
+#include "query.h"
+#include "query_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanQueryPool::VulkanQueryPool(unsigned t):
+ vulkan_type(get_vulkan_query_type(t))
+{
+ throw logic_error("VulkanQueryPool is unimplemented");
+}
+
+VulkanQueryPool::VulkanQueryPool(VulkanQueryPool &&other):
+ vulkan_type(other.vulkan_type)
+{ }
+
+VulkanQueryPool::~VulkanQueryPool()
+{ }
+
+void VulkanQueryPool::resize()
+{ }
+
+unsigned VulkanQueryPool::get_result(unsigned) const
+{
+ return 0;
+}
+
+
+unsigned get_vulkan_query_type(unsigned t)
+{
+ switch(t)
+ {
+ case OCCLUSION_QUERY: return VK_QUERY_TYPE_OCCLUSION;
+ default: throw invalid_argument("get_vulkan_query_type");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_QUERY_BACKEND_H_
+#define MSP_GL_QUERY_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include <vector>
+
+namespace Msp {
+namespace GL {
+
+class VulkanQueryPool: public NonCopyable
+{
+ friend class VulkanCommands;
+
+protected:
+ unsigned vulkan_type;
+
+ VulkanQueryPool(unsigned);
+ VulkanQueryPool(VulkanQueryPool &&);
+ ~VulkanQueryPool();
+
+ void resize();
+
+ unsigned get_result(unsigned) const;
+};
+
+using QueryPoolBackend = VulkanQueryPool;
+
+unsigned get_vulkan_query_type(unsigned);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "renderer.h"
+#include "renderer_backend.h"
+
+namespace Msp {
+namespace GL {
+
+void VulkanRenderer::begin(Semaphore &sem)
+{
+ static_cast<Renderer *>(this)->begin();
+
+ begin_semaphore = &sem;
+}
+
+void VulkanRenderer::end()
+{
+ static_cast<Renderer *>(this)->commands.use_pipeline(0);
+}
+
+void VulkanRenderer::end(Semaphore &sem)
+{
+ Renderer &self = *static_cast<Renderer *>(this);
+ self.end();
+ self.commands.submit_frame(begin_semaphore, &sem);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_RENDERER_BACKEND_H_
+#define MSP_GL_RENDERER_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include "commands.h"
+#include "pipelinestate.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanRenderer: public NonCopyable
+{
+protected:
+ PipelineState pipeline_state;
+ Semaphore *begin_semaphore;
+
+ void begin() { }
+ void begin(Semaphore &);
+
+ void end();
+ void end(Semaphore &);
+};
+
+using RendererBackend = VulkanRenderer;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "sampler.h"
+#include "sampler_backend.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanSampler::VulkanSampler()
+{
+ throw logic_error("VulkanSampler is unimplemented");
+}
+
+VulkanSampler::VulkanSampler(VulkanSampler &&other):
+ handle(other.handle)
+{
+ other.handle = 0;
+}
+
+VulkanSampler::~VulkanSampler()
+{ }
+
+void VulkanSampler::update(unsigned) const
+{ }
+
+void VulkanSampler::set_debug_name(const string &)
+{ }
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_SAMPLER_BACKEND_H_
+#define MSP_GL_SAMPLER_BACKEND_H_
+
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanSampler
+{
+ friend class VulkanPipelineState;
+
+protected:
+ VkSampler handle;
+
+ VulkanSampler();
+ VulkanSampler(VulkanSampler &&);
+ ~VulkanSampler();
+
+ static bool check_anisotropic(bool) { return true; }
+
+ void update(unsigned) const;
+
+ void set_debug_name(const std::string &);
+};
+
+using SamplerBackend = VulkanSampler;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "device.h"
+#include "semaphore.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+Semaphore::Semaphore():
+ device(Device::get_current())
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkSemaphoreCreateInfo semaphore_info = { };
+ semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+
+ vk.CreateSemaphore(semaphore_info, handle);
+}
+
+Semaphore::~Semaphore()
+{
+ device.get_destroy_queue().destroy(handle);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_SEMAPHORE_H_
+#define MSP_GL_VULKAN_SEMAPHORE_H_
+
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class Semaphore
+{
+ friend class SwapChain;
+ friend class VulkanCommands;
+
+private:
+ Device &device;
+ VkSemaphore handle;
+
+public:
+ Semaphore();
+ ~Semaphore();
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "stenciltest.h"
+#include "stenciltest_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_stencil_op(StencilOp op)
+{
+ switch(op)
+ {
+ case KEEP: return VK_STENCIL_OP_KEEP;
+ case SET_ZERO: return VK_STENCIL_OP_ZERO;
+ case REPLACE: return VK_STENCIL_OP_REPLACE;
+ case INCR_CLAMP: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case DECR_CLAMP: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case INVERT: return VK_STENCIL_OP_INVERT;
+ case INCR_WRAP: return VK_STENCIL_OP_INCREMENT_AND_WRAP;
+ case DECR_WRAP: return VK_STENCIL_OP_DECREMENT_AND_WRAP;
+ default: throw invalid_argument("get_vulkan_stencil_op");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_STENCILTEST_BACKEND_H_
+#define MSP_GL_STENCILTEST_BACKEND_H_
+
+#ifndef MSP_GL_STENCILTEST_H_
+#error "stenciltest_backend.h requires stenciltest.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_stencil_op(StencilOp);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "error.h"
+#include "structurebuilder.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+StructureBuilder::StructureBuilder(vector<char> &s, unsigned max_parts):
+ storage(s)
+{
+ storage.clear();
+ parts.reserve(max_parts);
+}
+
+void *&StructureBuilder::add(std::size_t size, std::size_t align)
+{
+ if(parts.size()>=parts.capacity())
+ throw invalid_operation("StructureBuilder::add");
+
+ parts.emplace_back();
+ Part &part = parts.back();
+ part.offset = storage.size()+align-1;
+ part.offset -= part.offset%align;
+
+ storage.resize(part.offset+size);
+ for(Part &p: parts)
+ p.pointer = storage.data()+p.offset;
+
+ return parts.back().pointer;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_STRUCTUREBUILDER_H_
+#define MSP_GL_VULKAN_STRUCTUREBUILDER_H_
+
+#include <cstdlib>
+#include <vector>
+
+namespace Msp {
+namespace GL {
+
+class StructureBuilder
+{
+private:
+ struct Part
+ {
+ std::size_t offset = 0;
+ void *pointer = 0;
+ };
+
+ std::vector<char> &storage;
+ std::vector<Part> parts;
+
+public:
+ StructureBuilder(std::vector<char> &, unsigned);
+
+ template<typename T>
+ T *&add(std::size_t count = 1)
+ { return reinterpret_cast<T *&>(add(sizeof(T)*count, alignof(T))); }
+
+private:
+ void *&add(std::size_t, std::size_t);
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <vector>
+#include <msp/core/algorithm.h>
+#include <msp/graphics/vulkancontext_platform.h>
+#include "device.h"
+#include "error.h"
+#include "pixelformat.h"
+#include "semaphore.h"
+#include "swapchain.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+SwapChain::SwapChain(unsigned w, unsigned h, unsigned n_images_min):
+ device(Device::get_current()),
+ surface(handle_cast<VkSurface>(device.get_context().get_private().surface)),
+ width(w),
+ height(h)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkSwapchainCreateInfoKHR swapchain_info = { };
+ swapchain_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ swapchain_info.surface = handle_cast<::VkSurfaceKHR>(surface);
+ swapchain_info.minImageCount = n_images_min;
+ swapchain_info.imageExtent.width = width;
+ swapchain_info.imageExtent.height = height;
+ swapchain_info.imageArrayLayers = 1;
+ swapchain_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ swapchain_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapchain_info.clipped = VK_TRUE;
+
+ VkSurfaceCapabilitiesKHR surface_caps;
+ vk.GetPhysicalDeviceSurfaceCapabilities(surface, surface_caps);
+ swapchain_info.preTransform = surface_caps.currentTransform;
+ swapchain_info.compositeAlpha = static_cast<VkCompositeAlphaFlagBitsKHR>(
+ surface_caps.supportedCompositeAlpha&~(surface_caps.supportedCompositeAlpha-1));
+
+ uint32_t n_formats = 0;
+ vk.GetPhysicalDeviceSurfaceFormats(surface, n_formats, 0);
+ vector<VkSurfaceFormatKHR> surface_formats(n_formats);
+ vk.GetPhysicalDeviceSurfaceFormats(surface, n_formats, surface_formats.data());
+
+ PixelFormat image_fmt = NO_PIXELFORMAT;
+ for(const VkSurfaceFormatKHR &f: surface_formats)
+ {
+ image_fmt = pixelformat_from_vulkan(f.format);
+ if(!is_srgb(image_fmt))
+ {
+ swapchain_info.imageFormat = f.format;
+ swapchain_info.imageColorSpace = f.colorSpace;
+ break;
+ }
+ }
+
+ if(!swapchain_info.imageFormat)
+ throw runtime_error("no suitable swapchain pixelformat");
+
+ uint32_t n_present_modes = 0;
+ vk.GetPhysicalDeviceSurfacePresentModes(surface, n_present_modes, 0);
+ vector<VkPresentModeKHR> present_modes(n_present_modes);
+ vk.GetPhysicalDeviceSurfacePresentModes(surface, n_present_modes, present_modes.data());
+
+ if(find(present_modes, VK_PRESENT_MODE_FIFO_KHR)!=present_modes.end())
+ swapchain_info.presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ else
+ swapchain_info.presentMode = present_modes.front();
+
+ vk.CreateSwapchain(swapchain_info, handle);
+
+ uint32_t n_images = 0;
+ vk.GetSwapchainImages(handle, n_images, 0);
+ vector<VkImage> image_handles(n_images);
+ vk.GetSwapchainImages(handle, n_images, image_handles.data());
+
+ images.reserve(n_images);
+ for(unsigned i=0; i<n_images; ++i)
+ images.emplace_back(move(SwapChainTexture(image_fmt, width, height, image_handles[i])));
+}
+
+SwapChain::~SwapChain()
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ vk.DestroySwapchain(handle);
+}
+
+unsigned SwapChain::begin_frame(Semaphore &sem)
+{
+ if(current_index>=0)
+ throw invalid_operation("SwapChain::begin_frame");
+
+ const VulkanFunctions &vk = device.get_functions();
+
+ uint32_t image_index;
+ vk.AcquireNextImage(handle, numeric_limits<uint64_t>::max(), sem.handle, 0, image_index);
+
+ current_index = image_index;
+
+ return image_index;
+}
+
+void SwapChain::present_frame(Semaphore &sem)
+{
+ if(current_index<0)
+ throw invalid_operation("SwapChain::present_frame");
+
+ const VulkanFunctions &vk = device.get_functions();
+ ::VkSwapchainKHR vk_handle = handle_cast<::VkSwapchainKHR>(handle);
+ ::VkSemaphore vk_sem = handle_cast<::VkSemaphore>(sem.handle);
+ uint32_t image_index = current_index;
+
+ current_index = -1;
+
+ VkPresentInfoKHR present_info = { };
+ present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ present_info.waitSemaphoreCount = 1;
+ present_info.pWaitSemaphores = &vk_sem;
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = &vk_handle;
+ present_info.pImageIndices = &image_index;
+ vk.QueuePresent(present_info);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_SWAPCHAIN_H_
+#define MSP_GL_VULKAN_SWAPCHAIN_H_
+
+#include "handles.h"
+#include "swapchaintexture.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+class Semaphore;
+
+class SwapChain
+{
+ friend class VulkanCommands;
+
+private:
+ Device &device;
+ VkSwapchain handle = 0;
+ VkSurface surface = 0;
+ unsigned width = 0;
+ unsigned height = 0;
+ std::vector<SwapChainTexture> images;
+ int current_index = -1;
+
+public:
+ SwapChain(unsigned, unsigned, unsigned);
+ ~SwapChain();
+
+ unsigned get_n_images() const { return images.size(); }
+ SwapChainTexture &get_image(unsigned i) { return images[i]; }
+
+ unsigned begin_frame(Semaphore &);
+ void present_frame(Semaphore &);
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "swapchaintexture.h"
+
+namespace Msp {
+namespace GL {
+
+SwapChainTexture::SwapChainTexture(PixelFormat f, unsigned w, unsigned h, VkImage i)
+{
+ handle = i;
+ storage(f, w, h, 1);
+}
+
+SwapChainTexture::~SwapChainTexture()
+{
+ handle = 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_SWAPCHAINTEXTURE_H_
+#define MSP_GL_VULKAN_SWAPCHAINTEXTURE_H_
+
+#include "texture2d.h"
+
+namespace Msp {
+namespace GL {
+
+class SwapChainTexture: public Texture2D
+{
+ friend class SwapChain;
+
+private:
+ SwapChainTexture(PixelFormat, unsigned, unsigned, VkImage);
+public:
+ SwapChainTexture(SwapChainTexture &&) = default;
+ ~SwapChainTexture();
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texture1d_backend.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture1D::VulkanTexture1D():
+ Texture(VK_IMAGE_VIEW_TYPE_1D)
+{
+ throw std::logic_error("VulkanTexture1D is unimplemented");
+}
+
+void VulkanTexture1D::sub_image(unsigned, int, unsigned, const void *)
+{ }
+
+size_t VulkanTexture1D::get_data_size() const
+{
+ return 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE1D_BACKEND_H_
+#define MSP_GL_TEXTURE1D_BACKEND_H_
+
+#include "texture.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTexture1D: public Texture
+{
+protected:
+ VulkanTexture1D();
+
+ void sub_image(unsigned, int, unsigned, const void *);
+
+public:
+ virtual AsyncLoader *load(IO::Seekable &, const Resources * = 0) { return 0; }
+ virtual std::size_t get_data_size() const;
+ virtual void unload() { }
+};
+
+using Texture1DBackend = VulkanTexture1D;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texture2d_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture2D::VulkanTexture2D():
+ Texture(VK_IMAGE_VIEW_TYPE_2D)
+{ }
+
+void VulkanTexture2D::sub_image(unsigned, int, int, unsigned, unsigned, const void *)
+{
+ throw logic_error("Texture2D::sub_image is unimplemented");
+}
+
+Resource::AsyncLoader *VulkanTexture2D::load(IO::Seekable &, const Resources *)
+{
+ throw logic_error("Texture2D::load is unimplemented");
+}
+
+uint64_t VulkanTexture2D::get_data_size() const
+{
+ return 0;
+}
+
+void VulkanTexture2D::unload()
+{
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE2D_BACKEND_H_
+#define MSP_GL_TEXTURE2D_BACKEND_H_
+
+#include "texture.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTexture2D: public Texture
+{
+protected:
+ VulkanTexture2D();
+
+ void sub_image(unsigned, int, int, unsigned, unsigned, const void *);
+
+public:
+ virtual Resource::AsyncLoader *load(IO::Seekable &, const Resources * = 0);
+ virtual std::size_t get_data_size() const;
+ virtual void unload();
+};
+
+using Texture2DBackend = VulkanTexture2D;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texture2darray_backend.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture2DArray::VulkanTexture2DArray():
+ Texture3D(VK_IMAGE_VIEW_TYPE_2D_ARRAY)
+{ }
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE2DARRAY_BACKEND_H_
+#define MSP_GL_TEXTURE2DARRAY_BACKEND_H_
+
+#include "texture3d.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTexture2DArray: public Texture3D
+{
+protected:
+ VulkanTexture2DArray();
+};
+
+using Texture2DArrayBackend = VulkanTexture2DArray;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texture2dmultisample_backend.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture2DMultisample::VulkanTexture2DMultisample():
+ Texture(VK_IMAGE_TYPE_2D)
+{
+ throw std::logic_error("VulkanTexture2DMultisample is unimplemented");
+}
+
+size_t VulkanTexture2DMultisample::get_data_size() const
+{
+ return 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE2DMULTISAMPLE_BACKEND_H_
+#define MSP_GL_TEXTURE2DMULTISAMPLE_BACKEND_H_
+
+#include "texture.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTexture2DMultisample: public Texture
+{
+protected:
+ VulkanTexture2DMultisample();
+
+public:
+ virtual AsyncLoader *load(IO::Seekable &, const Resources * = 0) { return 0; }
+ virtual std::size_t get_data_size() const;
+ virtual void unload() { }
+};
+
+using Texture2DMultisampleBackend = VulkanTexture2DMultisample;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texture3d_backend.h"
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture3D::VulkanTexture3D():
+ Texture(VK_IMAGE_VIEW_TYPE_3D)
+{
+ throw std::logic_error("VulkanTexture3D is unimplemented");
+}
+
+VulkanTexture3D::VulkanTexture3D(unsigned t):
+ Texture(t)
+{ }
+
+void VulkanTexture3D::sub_image(unsigned, int, int, int, unsigned, unsigned, unsigned, const void *)
+{
+}
+
+bool VulkanTexture3D::is_array() const
+{
+ return view_type==VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+}
+
+size_t VulkanTexture3D::get_data_size() const
+{
+ return 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE3D_BACKEND_H_
+#define MSP_GL_TEXTURE3D_BACKEND_H_
+
+#include "texture.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTexture3D: public Texture
+{
+protected:
+ VulkanTexture3D();
+ VulkanTexture3D(unsigned);
+
+ void sub_image(unsigned, int, int, int, unsigned, unsigned, unsigned, const void *);
+
+ bool is_array() const;
+
+public:
+ virtual AsyncLoader *load(IO::Seekable &, const Resources * = 0) { return 0; }
+ virtual std::size_t get_data_size() const;
+ virtual void unload() { }
+};
+
+using Texture3DBackend = VulkanTexture3D;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "device.h"
+#include "error.h"
+#include "texture.h"
+#include "texture_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanTexture::VulkanTexture(unsigned t):
+ device(Device::get_current()),
+ view_type(t)
+{ }
+
+VulkanTexture::VulkanTexture(VulkanTexture &&other):
+ device(other.device),
+ handle(other.handle),
+ view_handle(other.view_handle),
+ view_type(other.view_type),
+ debug_name(move(other.debug_name))
+{
+ other.handle = 0;
+ other.view_handle = 0;
+}
+
+VulkanTexture::~VulkanTexture()
+{
+ if(view_handle)
+ device.get_destroy_queue().destroy(view_handle);
+}
+
+void VulkanTexture::allocate()
+{
+ const Texture &self = *static_cast<const Texture *>(this);
+ const VulkanFunctions &vk = device.get_functions();
+
+ if(!handle)
+ throw logic_error("Texture image allocation is unimplemented");
+
+ VkImageViewCreateInfo view_info = { };
+ view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ view_info.image = handle_cast<::VkImage>(handle);
+ view_info.viewType = static_cast<VkImageViewType>(view_type);
+ view_info.format = static_cast<VkFormat>(get_vulkan_pixelformat(self.storage_fmt));
+
+ const unsigned *swizzle_order = get_vulkan_swizzle(self.swizzle);
+ view_info.components.r = static_cast<VkComponentSwizzle>(swizzle_order[0]);
+ view_info.components.g = static_cast<VkComponentSwizzle>(swizzle_order[1]);
+ view_info.components.b = static_cast<VkComponentSwizzle>(swizzle_order[2]);
+ view_info.components.a = static_cast<VkComponentSwizzle>(swizzle_order[3]);
+
+ view_info.subresourceRange.aspectMask = get_vulkan_aspect(get_components(self.storage_fmt));
+ view_info.subresourceRange.baseMipLevel = 0;
+ view_info.subresourceRange.levelCount = 1;
+ view_info.subresourceRange.baseArrayLayer = 0;
+ view_info.subresourceRange.layerCount = 1;
+
+ vk.CreateImageView(view_info, view_handle);
+
+ if(!debug_name.empty())
+ set_vulkan_object_names();
+}
+
+void VulkanTexture::generate_mipmap()
+{
+ throw logic_error("VulkanTexture::generate_mipmap is unimplemented");
+}
+
+void VulkanTexture::set_debug_name(const string &name)
+{
+#ifdef DEBUG
+ debug_name = name;
+ if(handle)
+ set_vulkan_object_names();
+#else
+ (void)name;
+#endif
+}
+
+void VulkanTexture::set_vulkan_object_names() const
+{
+#ifdef DEBUG
+ const VulkanFunctions &vk = device.get_functions();
+
+ string view_name = debug_name+"/view";
+ VkDebugUtilsObjectNameInfoEXT name_info = { };
+ name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ name_info.objectType = VK_OBJECT_TYPE_IMAGE_VIEW;
+ name_info.objectHandle = reinterpret_cast<uint64_t>(view_handle);
+ name_info.pObjectName = view_name.c_str();
+ vk.SetDebugUtilsObjectName(name_info);
+#endif
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURE_BACKEND_H_
+#define MSP_GL_TEXTURE_BACKEND_H_
+
+#include <msp/core/noncopyable.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Device;
+
+class VulkanTexture: public NonCopyable
+{
+ friend class VulkanFramebuffer;
+ friend class VulkanPipelineState;
+
+protected:
+ Device &device;
+ VkImage handle = 0;
+ VkImageView view_handle = 0;
+ unsigned view_type;
+ std::string debug_name;
+
+ VulkanTexture(unsigned);
+ VulkanTexture(VulkanTexture &&);
+ ~VulkanTexture();
+
+ void allocate();
+ void require_swizzle() { }
+
+ void generate_mipmap();
+
+ void set_debug_name(const std::string &);
+ void set_vulkan_object_names() const;
+};
+
+using TextureBackend = VulkanTexture;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "texturecube_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanTextureCube::VulkanTextureCube():
+ Texture(VK_IMAGE_VIEW_TYPE_CUBE)
+{
+ throw std::logic_error("VulkanTextureCube is unimplemented");
+}
+
+void VulkanTextureCube::sub_image(unsigned, unsigned, int, int, unsigned, unsigned, const void *)
+{
+}
+
+size_t VulkanTextureCube::get_data_size() const
+{
+ return 0;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_TEXTURECUBE_BACKEND_H_
+#define MSP_GL_TEXTURECUBE_BACKEND_H_
+
+#include "texture.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanTextureCube: public Texture
+{
+protected:
+ VulkanTextureCube();
+
+ void sub_image(unsigned, unsigned, int, int, unsigned, unsigned, const void *);
+
+public:
+ virtual AsyncLoader *load(IO::Seekable &, const Resources * = 0) { return 0; }
+ virtual std::size_t get_data_size() const;
+ virtual void unload() { }
+};
+
+using TextureCubeBackend = VulkanTextureCube;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/algorithm.h>
+#include "device.h"
+#include "transferqueue.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+TransferQueue::TransferQueue(Device &d):
+ device(d)
+{ }
+
+TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(size_t size)
+{
+ auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
+ if(i==buffers.end())
+ {
+ buffers.emplace_back(device, max(default_buffer_size, size));
+ i = prev(buffers.end());
+ }
+
+ PendingTransfer transfer;
+ transfer.buffer_index = distance(buffers.begin(), i);
+ transfer.offset = i->used;
+ transfer.size = size;
+ transfers.push_back(transfer);
+
+ i->used += size;
+
+ return transfers.back();
+}
+
+void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer)
+{
+ for(const PendingTransfer &t: transfers)
+ {
+ VkBuffer buffer = buffers[t.buffer_index].buffer;
+ t.callback(command_buffer, buffer, t.offset);
+ }
+
+ transfers.clear();
+}
+
+
+TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
+ device(d),
+ size(s)
+{
+ const VulkanFunctions &vk = device.get_functions();
+
+ VkBufferCreateInfo buffer_info = { };
+ buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ buffer_info.size = size;
+ buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ vk.CreateBuffer(buffer_info, buffer);
+
+ MemoryAllocator &allocator = device.get_allocator();
+ memory_id = allocator.allocate(buffer, STAGING_MEMORY);
+ mapped_address = allocator.map(memory_id, 0, size);
+}
+
+TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
+ device(other.device),
+ buffer(other.buffer),
+ memory_id(other.memory_id),
+ size(other.size),
+ used(other.used),
+ mapped_address(other.mapped_address)
+{
+ other.buffer = 0;
+ other.memory_id = 0;
+ other.mapped_address = 0;
+}
+
+TransferQueue::StagingBuffer::~StagingBuffer()
+{
+ const VulkanFunctions &vk = device.get_functions();
+ MemoryAllocator &allocator = device.get_allocator();
+
+ if(mapped_address)
+ {
+ allocator.unmap(mapped_address);
+ allocator.release(memory_id);
+ }
+ if(buffer)
+ vk.DestroyBuffer(buffer);
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_TRANSFERQUEUE_H_
+#define MSP_GL_VULKAN_TRANSFERQUEUE_H_
+
+#include <functional>
+#include <vector>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class Buffer;
+class Device;
+
+class TransferQueue
+{
+private:
+ struct StagingBuffer
+ {
+ Device &device;
+ VkBuffer buffer = 0;
+ unsigned memory_id = 0;
+ std::size_t size = 0;
+ std::size_t used = 0;
+ void *mapped_address = 0;
+
+ StagingBuffer(Device &, std::size_t);
+ StagingBuffer(StagingBuffer &&);
+ ~StagingBuffer();
+ };
+
+ using TransferCallback = void(VkCommandBuffer, VkBuffer, std::size_t);
+
+ struct PendingTransfer
+ {
+ unsigned buffer_index = 0;
+ std::size_t offset = 0;
+ std::size_t size = 0;
+ std::function<TransferCallback> callback;
+ };
+
+ Device &device;
+ std::size_t default_buffer_size = 16*1048576;
+ std::vector<StagingBuffer> buffers;
+ std::vector<PendingTransfer> transfers;
+
+public:
+ TransferQueue(Device &);
+
+ template<typename T>
+ void *prepare_transfer(std::size_t, T &&);
+
+private:
+ PendingTransfer &prepare_transfer(std::size_t);
+
+public:
+ void dispatch_transfers(VkCommandBuffer);
+};
+
+template<typename T>
+void *TransferQueue::prepare_transfer(std::size_t size, T &&callback)
+{
+ PendingTransfer &pt = prepare_transfer(size);
+ pt.callback = std::forward<T>(callback);
+ return static_cast<char *>(buffers[pt.buffer_index].mapped_address)+pt.offset;
+}
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#ifndef MSP_GL_UNIFORMBLOCK_BACKEND_H_
+#define MSP_GL_UNIFORMBLOCK_BACKEND_H_
+
+namespace Msp {
+namespace GL {
+
+class VulkanUniformBlock
+{
+protected:
+ VulkanUniformBlock(bool) { }
+ ~VulkanUniformBlock() = default;
+};
+
+using UniformBlockBackend = VulkanUniformBlock;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "vertexformat.h"
+#include "vertexformat_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_attribute_format(VertexAttribute a)
+{
+ switch(a&0x1FF)
+ {
+ case 0x011: return VK_FORMAT_R8_UNORM;
+ case 0x012: return VK_FORMAT_R8G8_UNORM;
+ case 0x013: return VK_FORMAT_R8G8B8_UNORM;
+ case 0x014: return VK_FORMAT_R8G8B8A8_UNORM;
+ case 0x019: return VK_FORMAT_R8_UINT;
+ case 0x01A: return VK_FORMAT_R8G8_UINT;
+ case 0x01B: return VK_FORMAT_R8G8B8_UINT;
+ case 0x01C: return VK_FORMAT_R8G8B8A8_UINT;
+ case 0x021: return VK_FORMAT_R16_UNORM;
+ case 0x022: return VK_FORMAT_R16G16_UNORM;
+ case 0x023: return VK_FORMAT_R16G16B16_UNORM;
+ case 0x024: return VK_FORMAT_R16G16B16A16_UNORM;
+ case 0x029: return VK_FORMAT_R16_UINT;
+ case 0x02A: return VK_FORMAT_R16G16_UINT;
+ case 0x02B: return VK_FORMAT_R16G16B16_UINT;
+ case 0x02C: return VK_FORMAT_R16G16B16A16_UINT;
+ case 0x049: return VK_FORMAT_R32_UINT;
+ case 0x04A: return VK_FORMAT_R32G32_UINT;
+ case 0x04B: return VK_FORMAT_R32G32B32_UINT;
+ case 0x04C: return VK_FORMAT_R32G32B32A32_UINT;
+ case 0x091: return VK_FORMAT_R8_SNORM;
+ case 0x092: return VK_FORMAT_R8G8_SNORM;
+ case 0x093: return VK_FORMAT_R8G8B8_SNORM;
+ case 0x094: return VK_FORMAT_R8G8B8A8_SNORM;
+ case 0x099: return VK_FORMAT_R8_SINT;
+ case 0x09A: return VK_FORMAT_R8G8_SINT;
+ case 0x09B: return VK_FORMAT_R8G8B8_SINT;
+ case 0x09C: return VK_FORMAT_R8G8B8A8_SINT;
+ case 0x0A1: return VK_FORMAT_R16_SNORM;
+ case 0x0A2: return VK_FORMAT_R16G16_SNORM;
+ case 0x0A3: return VK_FORMAT_R16G16B16_SNORM;
+ case 0x0A4: return VK_FORMAT_R16G16B16A16_SNORM;
+ case 0x0A9: return VK_FORMAT_R16_SINT;
+ case 0x0AA: return VK_FORMAT_R16G16_SINT;
+ case 0x0AB: return VK_FORMAT_R16G16B16_SINT;
+ case 0x0AC: return VK_FORMAT_R16G16B16A16_SINT;
+ case 0x0C9: return VK_FORMAT_R32_SINT;
+ case 0x0CA: return VK_FORMAT_R32G32_SINT;
+ case 0x0CB: return VK_FORMAT_R32G32B32_SINT;
+ case 0x0CC: return VK_FORMAT_R32G32B32A32_SINT;
+ case 0x1C1: return VK_FORMAT_R32_SFLOAT;
+ case 0x1C2: return VK_FORMAT_R32G32_SFLOAT;
+ case 0x1C3: return VK_FORMAT_R32G32B32_SFLOAT;
+ case 0x1C4: return VK_FORMAT_R32G32B32A32_SFLOAT;
+ default: throw invalid_argument("get_vulkan_attribute_format");
+ }
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VERTEXFORMAT_BACKEND_H_
+#define MSP_GL_VERTEXFORMAT_BACKEND_H_
+
+#ifndef MSP_GL_VERTEXFORMAT_H_
+#error "vertexformat_backend.h requires vertexformat.h"
+#endif
+
+namespace Msp {
+namespace GL {
+
+unsigned get_vulkan_attribute_format(VertexAttribute);
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/core/hash.h>
+#include "buffer.h"
+#include "structurebuilder.h"
+#include "vertexarray.h"
+#include "vertexformat.h"
+#include "vertexsetup.h"
+#include "vertexsetup_backend.h"
+#include "vulkan.h"
+
+using namespace std;
+
+namespace Msp {
+namespace GL {
+
+VulkanVertexSetup::VulkanVertexSetup(VulkanVertexSetup &&other):
+ creation_info(move(other.creation_info))
+{ }
+
+void VulkanVertexSetup::update(unsigned) const
+{
+ const VertexSetup &self = *static_cast<const VertexSetup *>(this);
+
+ n_bindings = (self.inst_format.empty() ? 1 : 2);
+ unsigned n_attribs = 0;
+ for(VertexAttribute a: self.vertex_format)
+ n_attribs += !is_padding(a);
+ for(VertexAttribute a: self.inst_format)
+ n_attribs += !is_padding(a);
+
+ StructureBuilder sb(creation_info, 3);
+ VkPipelineVertexInputStateCreateInfo *&input_state = sb.add<VkPipelineVertexInputStateCreateInfo>();
+ VkVertexInputBindingDescription *&bindings = sb.add<VkVertexInputBindingDescription>(n_bindings);
+ VkVertexInputAttributeDescription *&attribs = sb.add<VkVertexInputAttributeDescription>(n_attribs);
+
+ bindings[0].binding = 0;
+ bindings[0].stride = self.vertex_format.stride();
+ bindings[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+ update_attributes(self.vertex_format, 0, attribs);
+
+ if(!self.inst_format.empty())
+ {
+ bindings[1].binding = 1;
+ bindings[1].stride = self.inst_format.stride();
+ bindings[1].inputRate = VK_VERTEX_INPUT_RATE_INSTANCE;
+ update_attributes(self.inst_format, 1, attribs+self.vertex_format.size());
+ }
+
+ input_state->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ input_state->vertexBindingDescriptionCount = n_bindings;
+ input_state->pVertexBindingDescriptions = bindings;
+ input_state->vertexAttributeDescriptionCount = n_attribs;
+ input_state->pVertexAttributeDescriptions = attribs;
+
+ buffers[0] = self.vertex_array->get_buffer()->handle;
+ offsets[0] = self.vertex_array->get_offset();
+
+ if(self.inst_array)
+ {
+ buffers[1] = self.inst_array->get_buffer()->handle;
+ offsets[1] = self.inst_array->get_offset();
+ }
+}
+
+void VulkanVertexSetup::update_attributes(const VertexFormat &fmt, unsigned binding, void *buffer) const
+{
+ VkVertexInputAttributeDescription *attrib = reinterpret_cast<VkVertexInputAttributeDescription *>(buffer);
+ unsigned offset = 0;
+ for(VertexAttribute a: fmt)
+ {
+ if(!is_padding(a))
+ {
+ attrib->location = get_attribute_semantic(a);
+ attrib->binding = binding;
+ attrib->format = static_cast<VkFormat>(get_vulkan_attribute_format(a));
+ attrib->offset = offset;
+ ++attrib;
+ }
+ offset += get_attribute_size(a);
+ }
+}
+
+uint64_t VulkanVertexSetup::compute_hash() const
+{
+ const VertexSetup &self = *static_cast<const VertexSetup *>(this);
+
+ uint64_t result = hash<64>(0, 0);
+ for(VertexAttribute a: self.vertex_format)
+ result = hash_update<64>(result, a);
+ for(VertexAttribute a: self.inst_format)
+ result = hash_update<64>(result, a);
+
+ return result;
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VERTEXSETUP_BACKEND_H_
+#define MSP_GL_VERTEXSETUP_BACKEND_H_
+
+#include <cstdint>
+#include <vector>
+#include <msp/core/noncopyable.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+class VertexArray;
+class VertexFormat;
+
+class VulkanVertexSetup: public NonCopyable
+{
+ friend class VulkanPipelineState;
+
+protected:
+ mutable std::vector<char> creation_info;
+ mutable std::uint32_t n_bindings;
+ mutable VkBuffer buffers[2];
+ mutable std::uint64_t offsets[2];
+
+ VulkanVertexSetup() = default;
+ VulkanVertexSetup(VulkanVertexSetup &&);
+ ~VulkanVertexSetup() = default;
+
+ static void require_format(const VertexFormat &, bool) { }
+ void update(unsigned) const;
+ void update_attributes(const VertexFormat &, unsigned, void *) const;
+ std::uint64_t compute_hash() const;
+
+ void unload() { }
+
+ void set_debug_name(const std::string &) { }
+};
+
+using VertexSetupBackend = VulkanVertexSetup;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include <msp/graphics/vulkancontext_platform.h>
+#include "vulkan.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanFunctions::VulkanFunctions(const Graphics::VulkanContext &c):
+ context(c),
+ device(context.get_private().device),
+ physicalDevice(context.get_private().physical_device),
+ graphicsQueue(context.get_private().graphics_queue),
+ // 5
+ vkGetPhysicalDeviceProperties(context.get_function<PFN_vkGetPhysicalDeviceProperties>("vkGetPhysicalDeviceProperties")),
+ // 6
+ vkCreateCommandPool(context.get_function<PFN_vkCreateCommandPool>("vkCreateCommandPool")),
+ vkResetCommandPool(context.get_function<PFN_vkResetCommandPool>("vkResetCommandPool")),
+ vkDestroyCommandPool(context.get_function<PFN_vkDestroyCommandPool>("vkDestroyCommandPool")),
+ vkAllocateCommandBuffers(context.get_function<PFN_vkAllocateCommandBuffers>("vkAllocateCommandBuffers")),
+ vkBeginCommandBuffer(context.get_function<PFN_vkBeginCommandBuffer>("vkBeginCommandBuffer")),
+ vkEndCommandBuffer(context.get_function<PFN_vkEndCommandBuffer>("vkEndCommandBuffer")),
+ vkQueueSubmit(context.get_function<PFN_vkQueueSubmit>("vkQueueSubmit")),
+ // 7
+ vkCreateFence(context.get_function<PFN_vkCreateFence>("vkCreateFence")),
+ vkDestroyFence(context.get_function<PFN_vkDestroyFence>("vkDestroyFence")),
+ vkGetFenceStatus(context.get_function<PFN_vkGetFenceStatus>("vkGetFenceStatus")),
+ vkResetFences(context.get_function<PFN_vkResetFences>("vkResetFences")),
+ vkWaitForFences(context.get_function<PFN_vkWaitForFences>("vkWaitForFences")),
+ vkCreateSemaphore(context.get_function<PFN_vkCreateSemaphore>("vkCreateSemaphore")),
+ vkDestroySemaphore(context.get_function<PFN_vkDestroySemaphore>("vkDestroySemaphore")),
+ vkCmdPipelineBarrier(context.get_function<PFN_vkCmdPipelineBarrier>("vkCmdPipelineBarrier")),
+ vkQueueWaitIdle(context.get_function<PFN_vkQueueWaitIdle>("vkQueueWaitIdle")),
+ // 8
+ vkCreateRenderPass(context.get_function<PFN_vkCreateRenderPass>("vkCreateRenderPass")),
+ vkDestroyRenderPass(context.get_function<PFN_vkDestroyRenderPass>("vkDestroyRenderPass")),
+ vkCreateFramebuffer(context.get_function<PFN_vkCreateFramebuffer>("vkCreateFramebuffer")),
+ vkDestroyFramebuffer(context.get_function<PFN_vkDestroyFramebuffer>("vkDestroyFramebuffer")),
+ vkCmdBeginRenderPass(context.get_function<PFN_vkCmdBeginRenderPass>("vkCmdBeginRenderPass")),
+ vkCmdEndRenderPass(context.get_function<PFN_vkCmdEndRenderPass>("vkCmdEndRenderPass")),
+ // 9
+ vkCreateShaderModule(context.get_function<PFN_vkCreateShaderModule>("vkCreateShaderModule")),
+ vkDestroyShaderModule(context.get_function<PFN_vkDestroyShaderModule>("vkDestroyShaderModule")),
+ // 10
+ vkCreateGraphicsPipelines(context.get_function<PFN_vkCreateGraphicsPipelines>("vkCreateGraphicsPipelines")),
+ vkDestroyPipeline(context.get_function<PFN_vkDestroyPipeline>("vkDestroyPipeline")),
+ vkCmdBindPipeline(context.get_function<PFN_vkCmdBindPipeline>("vkCmdBindPipeline")),
+ // 11
+ vkGetPhysicalDeviceMemoryProperties(context.get_function<PFN_vkGetPhysicalDeviceMemoryProperties>("vkGetPhysicalDeviceMemoryProperties")),
+ vkAllocateMemory(context.get_function<PFN_vkAllocateMemory>("vkAllocateMemory")),
+ vkFreeMemory(context.get_function<PFN_vkFreeMemory>("vkFreeMemory")),
+ vkMapMemory(context.get_function<PFN_vkMapMemory>("vkMapMemory")),
+ vkUnmapMemory(context.get_function<PFN_vkUnmapMemory>("vkUnmapMemory")),
+ // 12
+ vkCreateBuffer(context.get_function<PFN_vkCreateBuffer>("vkCreateBuffer")),
+ vkDestroyBuffer(context.get_function<PFN_vkDestroyBuffer>("vkDestroyBuffer")),
+ vkCreateImageView(context.get_function<PFN_vkCreateImageView>("vkCreateImageView")),
+ vkDestroyImageView(context.get_function<PFN_vkDestroyImageView>("vkDestroyImageView")),
+ vkGetBufferMemoryRequirements(context.get_function<PFN_vkGetBufferMemoryRequirements>("vkGetBufferMemoryRequirements")),
+ vkBindBufferMemory(context.get_function<PFN_vkBindBufferMemory>("vkBindBufferMemory")),
+ // 14
+ vkCreateDescriptorSetLayout(context.get_function<PFN_vkCreateDescriptorSetLayout>("vkCreateDescriptorSetLayout")),
+ vkDestroyDescriptorSetLayout(context.get_function<PFN_vkDestroyDescriptorSetLayout>("vkDestroyDescriptorSetLayout")),
+ vkCreatePipelineLayout(context.get_function<PFN_vkCreatePipelineLayout>("vkCreatePipelineLayout")),
+ vkDestroyPipelineLayout(context.get_function<PFN_vkDestroyPipelineLayout>("vkDestroyPipelineLayout")),
+ vkCreateDescriptorPool(context.get_function<PFN_vkCreateDescriptorPool>("vkCreateDescriptorPool")),
+ vkDestroyDescriptorPool(context.get_function<PFN_vkDestroyDescriptorPool>("vkDestroyDescriptorPool")),
+ vkAllocateDescriptorSets(context.get_function<PFN_vkAllocateDescriptorSets>("vkAllocateDescriptorSets")),
+ vkUpdateDescriptorSets(context.get_function<PFN_vkUpdateDescriptorSets>("vkUpdateDescriptorSets")),
+ vkCmdBindDescriptorSets(context.get_function<PFN_vkCmdBindDescriptorSets>("vkCmdBindDescriptorSets")),
+ vkCmdPushConstants(context.get_function<PFN_vkCmdPushConstants>("vkCmdPushConstants")),
+ // 19
+ vkCmdCopyBuffer(context.get_function<PFN_vkCmdCopyBuffer>("vkCmdCopyBuffer")),
+ // 20
+ vkCmdBindIndexBuffer(context.get_function<PFN_vkCmdBindIndexBuffer>("vkCmdBindIndexBuffer")),
+ vkCmdDrawIndexed(context.get_function<PFN_vkCmdDrawIndexed>("vkCmdDrawIndexed")),
+ // 21
+ vkCmdBindVertexBuffers(context.get_function<PFN_vkCmdBindVertexBuffers>("vkCmdBindVertexBuffers")),
+ // 24
+ vkCmdSetViewport(context.get_function<PFN_vkCmdSetViewport>("vkCmdSetViewport")),
+ // 26
+ vkCmdSetScissor(context.get_function<PFN_vkCmdSetScissor>("vkCmdSetScissor")),
+ // 30
+ vkGetPhysicalDeviceSurfaceCapabilities(context.get_function<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>("vkGetPhysicalDeviceSurfaceCapabilitiesKHR")),
+ vkGetPhysicalDeviceSurfaceFormats(context.get_function<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>("vkGetPhysicalDeviceSurfaceFormatsKHR")),
+ vkGetPhysicalDeviceSurfacePresentModes(context.get_function<PFN_vkGetPhysicalDeviceSurfacePresentModesKHR>("vkGetPhysicalDeviceSurfacePresentModesKHR")),
+ vkCreateSwapchain(context.get_function<PFN_vkCreateSwapchainKHR>("vkCreateSwapchainKHR")),
+ vkDestroySwapchain(context.get_function<PFN_vkDestroySwapchainKHR>("vkDestroySwapchainKHR")),
+ vkGetSwapchainImages(context.get_function<PFN_vkGetSwapchainImagesKHR>("vkGetSwapchainImagesKHR")),
+ vkAcquireNextImage(context.get_function<PFN_vkAcquireNextImageKHR>("vkAcquireNextImageKHR")),
+ vkQueuePresent(context.get_function<PFN_vkQueuePresentKHR>("vkQueuePresentKHR")),
+ // 39
+ vkGetPhysicalDeviceFormatProperties(context.get_function<PFN_vkGetPhysicalDeviceFormatProperties>("vkGetPhysicalDeviceFormatProperties")),
+ // 45
+ vkSetDebugUtilsObjectName(context.get_function<PFN_vkSetDebugUtilsObjectNameEXT>("vkSetDebugUtilsObjectNameEXT"))
+{ }
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_VULKAN_H_
+#define MSP_GL_VULKAN_H_
+
+#include <vulkan/vulkan.h>
+#include <msp/graphics/vulkancontext.h>
+#include "handles.h"
+
+namespace Msp {
+namespace GL {
+
+template<typename T> struct HandleTraits;
+template<> struct HandleTraits<VkBuffer> { using NativeHandle = ::VkBuffer; };
+template<> struct HandleTraits<::VkBuffer> { using Wrapper = VkBuffer; };
+template<> struct HandleTraits<VkCommandBuffer> { using NativeHandle = ::VkCommandBuffer; };
+template<> struct HandleTraits<::VkCommandBuffer> { using Wrapper = VkCommandBuffer; };
+template<> struct HandleTraits<VkCommandPool> { using NativeHandle = ::VkCommandPool; };
+template<> struct HandleTraits<::VkCommandPool> { using Wrapper = VkCommandPool; };
+template<> struct HandleTraits<VkDevice> { using NativeHandle = ::VkDevice; };
+template<> struct HandleTraits<::VkDevice> { using Wrapper = VkDevice; };
+template<> struct HandleTraits<VkDescriptorPool> { using NativeHandle = ::VkDescriptorPool; };
+template<> struct HandleTraits<::VkDescriptorPool> { using Wrapper = VkDescriptorPool; };
+template<> struct HandleTraits<VkDescriptorSet> { using NativeHandle = ::VkDescriptorSet; };
+template<> struct HandleTraits<::VkDescriptorSet> { using Wrapper = VkDescriptorSet; };
+template<> struct HandleTraits<VkDescriptorSetLayout> { using NativeHandle = ::VkDescriptorSetLayout; };
+template<> struct HandleTraits<::VkDescriptorSetLayout> { using Wrapper = VkDescriptorSetLayout; };
+template<> struct HandleTraits<VkDeviceMemory> { using NativeHandle = ::VkDeviceMemory; };
+template<> struct HandleTraits<::VkDeviceMemory> { using Wrapper = VkDeviceMemory; };
+template<> struct HandleTraits<VkFence> { using NativeHandle = ::VkFence; };
+template<> struct HandleTraits<::VkFence> { using Wrapper = VkFence; };
+template<> struct HandleTraits<VkFramebuffer> { using NativeHandle = ::VkFramebuffer; };
+template<> struct HandleTraits<::VkFramebuffer> { using Wrapper = VkFramebuffer; };
+template<> struct HandleTraits<VkImage> { using NativeHandle = ::VkImage; };
+template<> struct HandleTraits<::VkImage> { using Wrapper = VkImage; };
+template<> struct HandleTraits<VkImageView> { using NativeHandle = ::VkImageView; };
+template<> struct HandleTraits<::VkImageView> { using Wrapper = VkImageView; };
+template<> struct HandleTraits<VkPhysicalDevice> { using NativeHandle = ::VkPhysicalDevice; };
+template<> struct HandleTraits<::VkPhysicalDevice> { using Wrapper = VkPhysicalDevice; };
+template<> struct HandleTraits<VkPipeline> { using NativeHandle = ::VkPipeline; };
+template<> struct HandleTraits<::VkPipeline> { using Wrapper = VkPipeline; };
+template<> struct HandleTraits<VkPipelineCache> { using NativeHandle = ::VkPipelineCache; };
+template<> struct HandleTraits<::VkPipelineCache> { using Wrapper = VkPipelineCache; };
+template<> struct HandleTraits<VkPipelineLayout> { using NativeHandle = ::VkPipelineLayout; };
+template<> struct HandleTraits<::VkPipelineLayout> { using Wrapper = VkPipelineLayout; };
+template<> struct HandleTraits<VkQueue> { using NativeHandle = ::VkQueue; };
+template<> struct HandleTraits<::VkQueue> { using Wrapper = VkQueue; };
+template<> struct HandleTraits<VkRenderPass> { using NativeHandle = ::VkRenderPass; };
+template<> struct HandleTraits<::VkRenderPass> { using Wrapper = VkRenderPass; };
+template<> struct HandleTraits<VkSampler> { using NativeHandle = ::VkSampler; };
+template<> struct HandleTraits<::VkSampler> { using Wrapper = VkSampler; };
+template<> struct HandleTraits<VkSemaphore> { using NativeHandle = ::VkSemaphore; };
+template<> struct HandleTraits<::VkSemaphore> { using Wrapper = VkSemaphore; };
+template<> struct HandleTraits<VkShaderModule> { using NativeHandle = ::VkShaderModule; };
+template<> struct HandleTraits<::VkShaderModule> { using Wrapper = VkShaderModule; };
+template<> struct HandleTraits<VkSurface> { using NativeHandle = ::VkSurfaceKHR; };
+template<> struct HandleTraits<::VkSurfaceKHR> { using Wrapper = VkSurface; };
+template<> struct HandleTraits<VkSwapchain> { using NativeHandle = ::VkSwapchainKHR; };
+template<> struct HandleTraits<::VkSwapchainKHR> { using Wrapper = VkSwapchain; };
+
+template<typename T>
+T handle_cast(typename HandleTraits<T>::Wrapper handle)
+{ return reinterpret_cast<T>(handle); }
+
+template<typename T>
+T handle_cast(typename HandleTraits<T>::NativeHandle handle)
+{ return reinterpret_cast<T>(handle); }
+
+template<typename T>
+T handle_cast(typename HandleTraits<typename std::remove_pointer<T>::type>::Wrapper *handle)
+{ return reinterpret_cast<T>(handle); }
+
+template<typename T>
+T handle_cast(const typename HandleTraits<typename std::remove_const<typename std::remove_pointer<T>::type>::type>::Wrapper *handle)
+{ return reinterpret_cast<T>(handle); }
+
+
+class Result
+{
+private:
+ VkResult result;
+ const char *function;
+ bool checked = false;
+
+public:
+ Result(VkResult r, const char *f): result(r), function(f) { }
+ Result(Result &&r): result(r.result), function(r.function) { r.result = VK_SUCCESS; }
+ ~Result() noexcept(false) { if(!checked) check(); }
+
+ bool operator==(VkResult r) { checked = true; return result==r; }
+ void check() { if(result!=VK_SUCCESS) throw Graphics::vulkan_error(result, function); }
+};
+
+
+class VulkanFunctions
+{
+private:
+ const Graphics::VulkanContext &context;
+ ::VkDevice device;
+ ::VkPhysicalDevice physicalDevice;
+ ::VkQueue graphicsQueue;
+
+ PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties = 0; // 5.1
+ PFN_vkCreateCommandPool vkCreateCommandPool = 0; // 6.2
+ PFN_vkResetCommandPool vkResetCommandPool = 0; // 6.2
+ PFN_vkDestroyCommandPool vkDestroyCommandPool = 0; // 6.2
+ PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers = 0; // 6.3
+ PFN_vkBeginCommandBuffer vkBeginCommandBuffer = 0; // 6.4
+ PFN_vkEndCommandBuffer vkEndCommandBuffer = 0; // 6.4
+ PFN_vkQueueSubmit vkQueueSubmit = 0; // 6.5
+ PFN_vkCreateFence vkCreateFence = 0; // 7.3
+ PFN_vkDestroyFence vkDestroyFence = 0; // 7.3
+ PFN_vkGetFenceStatus vkGetFenceStatus = 0; // 7.3
+ PFN_vkResetFences vkResetFences = 0; // 7.3
+ PFN_vkWaitForFences vkWaitForFences = 0; // 7.3
+ PFN_vkCreateSemaphore vkCreateSemaphore = 0; // 7.4
+ PFN_vkDestroySemaphore vkDestroySemaphore = 0; // 7.4
+ PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier = 0; // 7.6
+ PFN_vkQueueWaitIdle vkQueueWaitIdle = 0; // 7.8
+ PFN_vkCreateRenderPass vkCreateRenderPass = 0; // 8.1
+ PFN_vkDestroyRenderPass vkDestroyRenderPass = 0; // 8.1
+ PFN_vkCreateFramebuffer vkCreateFramebuffer = 0; // 8.3
+ PFN_vkDestroyFramebuffer vkDestroyFramebuffer = 0; // 8.3
+ PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass = 0; // 8.4
+ PFN_vkCmdEndRenderPass vkCmdEndRenderPass = 0; // 8.4
+ PFN_vkCreateShaderModule vkCreateShaderModule = 0; // 9.1
+ PFN_vkDestroyShaderModule vkDestroyShaderModule = 0; // 9.1
+ PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines = 0; // 10.2
+ PFN_vkDestroyPipeline vkDestroyPipeline = 0; // 10.4
+ PFN_vkCmdBindPipeline vkCmdBindPipeline = 0; // 10.10
+ PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties = 0; // 11.2.1
+ PFN_vkAllocateMemory vkAllocateMemory = 0; // 11.2.3
+ PFN_vkFreeMemory vkFreeMemory = 0; // 11.2.8
+ PFN_vkMapMemory vkMapMemory = 0; // 11.2.9
+ PFN_vkUnmapMemory vkUnmapMemory = 0; // 11.2.9
+ PFN_vkCreateBuffer vkCreateBuffer = 0; // 12.1
+ PFN_vkDestroyBuffer vkDestroyBuffer = 0; // 12.1
+ PFN_vkCreateImageView vkCreateImageView = 0; // 12.5
+ PFN_vkDestroyImageView vkDestroyImageView = 0; // 12.5
+ PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements = 0; // 12.7
+ PFN_vkBindBufferMemory vkBindBufferMemory = 0; // 12.7
+ PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout = 0; // 14.2.1
+ PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout = 0; // 14.2.1
+ PFN_vkCreatePipelineLayout vkCreatePipelineLayout = 0; // 14.2.2
+ PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout = 0; // 14.2.2
+ PFN_vkCreateDescriptorPool vkCreateDescriptorPool = 0; // 14.2.3
+ PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool = 0; // 14.2.3
+ PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets = 0; // 14.2.3
+ PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets = 0; // 14.2.4
+ PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets = 0; // 14.2.7
+ PFN_vkCmdPushConstants vkCmdPushConstants = 0; // 14.2.10
+ PFN_vkCmdCopyBuffer vkCmdCopyBuffer = 0; // 19.2
+ PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer = 0; // 20.3
+ PFN_vkCmdDrawIndexed vkCmdDrawIndexed = 0; // 20.3
+ PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers = 0; // 21.2
+ PFN_vkCmdSetViewport vkCmdSetViewport = 0; // 24.5
+ PFN_vkCmdSetScissor vkCmdSetScissor = 0; // 26.1
+ PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilities = 0; // 30.5.1
+ PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormats = 0; // 30.5.2
+ PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModes = 0; // 30.5.3
+ PFN_vkCreateSwapchainKHR vkCreateSwapchain = 0; // 30.8
+ PFN_vkDestroySwapchainKHR vkDestroySwapchain = 0; // 30.8
+ PFN_vkGetSwapchainImagesKHR vkGetSwapchainImages = 0; // 30.8
+ PFN_vkAcquireNextImageKHR vkAcquireNextImage = 0; // 30.8
+ PFN_vkQueuePresentKHR vkQueuePresent = 0; // 30.8
+ PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties = 0; // 39.2
+ PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectName = 0; // 45.1.1
+
+public:
+ VulkanFunctions(const Graphics::VulkanContext &);
+
+ // Chapter 5: Devices and Queues
+ void GetPhysicalDeviceProperties(VkPhysicalDeviceProperties &rProperties) const
+ { vkGetPhysicalDeviceProperties(physicalDevice, &rProperties); }
+
+ // Chapter 6: Command Buffers
+ Result CreateCommandPool(const VkCommandPoolCreateInfo &rCreateInfo, VkCommandPool &rCommandPool) const
+ { return { vkCreateCommandPool(device, &rCreateInfo, 0, handle_cast<::VkCommandPool *>(&rCommandPool)), "vkCreateCommandPool" }; }
+
+ Result ResetCommandPool(VkCommandPool commandPool, VkCommandPoolResetFlags flags) const
+ { return { vkResetCommandPool(device, handle_cast<::VkCommandPool>(commandPool), flags), "vkResetCommandPool" }; }
+
+ void DestroyCommandPool(VkCommandPool commandPool) const
+ { vkDestroyCommandPool(device, handle_cast<::VkCommandPool>(commandPool), 0); }
+
+ Result AllocateCommandBuffers(const VkCommandBufferAllocateInfo &rAllocateInfo, VkCommandBuffer *pCommandBuffers) const
+ { return { vkAllocateCommandBuffers(device, &rAllocateInfo, handle_cast<::VkCommandBuffer *>(pCommandBuffers)), "vkAllocateCommandBuffers" }; }
+
+ Result BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo &rBeginInfo) const
+ { return { vkBeginCommandBuffer(handle_cast<::VkCommandBuffer>(commandBuffer), &rBeginInfo), "vkBeginCommandBuffer" }; }
+
+ Result EndCommandBuffer(VkCommandBuffer commandBuffer) const
+ { return { vkEndCommandBuffer(handle_cast<::VkCommandBuffer>(commandBuffer)), "vkEndCommandBuffer" }; }
+
+ Result QueueSubmit(std::uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) const
+ { return { vkQueueSubmit(graphicsQueue, submitCount, pSubmits, handle_cast<::VkFence>(fence)), "vkQueueSubmit" }; }
+
+ // Chapter 7: Synchronization and Cache Control
+ Result CreateFence(const VkFenceCreateInfo &rCreateInfo, VkFence &rFence) const
+ { return { vkCreateFence(device, &rCreateInfo, 0, handle_cast<::VkFence *>(&rFence)), "vkCreateFence" }; }
+
+ void DestroyFence(VkFence fence) const
+ { vkDestroyFence(device, handle_cast<::VkFence>(fence), 0); }
+
+ Result GetFenceStatus(VkFence fence) const
+ { return { vkGetFenceStatus(device, handle_cast<::VkFence>(fence)), "vkGetFenceStatus" }; }
+
+ Result ResetFences(std::uint32_t fenceCount, const VkFence *pFences) const
+ { return { vkResetFences(device, fenceCount, handle_cast<const ::VkFence *>(pFences)), "vkResetFences" }; }
+
+ Result WaitForFences(std::uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, std::uint64_t timeout) const
+ { return { vkWaitForFences(device, fenceCount, handle_cast<const ::VkFence *>(pFences), waitAll, timeout), "vkWaitForFences" }; }
+
+ Result CreateSemaphore(const VkSemaphoreCreateInfo &rCreateInfo, VkSemaphore &rSemaphore) const
+ { return { vkCreateSemaphore(device, &rCreateInfo, 0, handle_cast<::VkSemaphore *>(&rSemaphore)), "vkCreateSemaphore" }; }
+
+ void DestroySemaphore(VkSemaphore semaphore) const
+ { vkDestroySemaphore(device, handle_cast<::VkSemaphore>(semaphore), 0); }
+
+ void CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const
+ { vkCmdPipelineBarrier(handle_cast<::VkCommandBuffer>(commandBuffer), srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); }
+
+ Result QueueWaitIdle() const
+ { return { vkQueueWaitIdle(graphicsQueue), "vkQueueWaitIdle" }; }
+
+ // Chapter 8: Render Pass
+ Result CreateRenderPass(const VkRenderPassCreateInfo &rCreateInfo, VkRenderPass &rRenderPass) const
+ { return { vkCreateRenderPass(device, &rCreateInfo, 0, handle_cast<::VkRenderPass *>(&rRenderPass)), "vkCreateRenderPass" }; }
+
+ void DestroyRenderPass(VkRenderPass renderPass) const
+ { vkDestroyRenderPass(device, handle_cast<::VkRenderPass>(renderPass), 0); }
+
+ Result CreateFramebuffer(const VkFramebufferCreateInfo &rCreateInfo, VkFramebuffer &rFramebuffer) const
+ { return { vkCreateFramebuffer(device, &rCreateInfo, 0, handle_cast<::VkFramebuffer *>(&rFramebuffer)), "vkCreateFramebuffer" }; }
+
+ void DestroyFramebuffer(VkFramebuffer framebuffer) const
+ { vkDestroyFramebuffer(device, handle_cast<::VkFramebuffer>(framebuffer), 0); }
+
+ void CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo &rRenderPassBegin, VkSubpassContents contents) const
+ { vkCmdBeginRenderPass(handle_cast<::VkCommandBuffer>(commandBuffer), &rRenderPassBegin, contents); }
+
+ void CmdEndRenderPass(VkCommandBuffer commandBuffer) const
+ { vkCmdEndRenderPass(handle_cast<::VkCommandBuffer>(commandBuffer)); }
+
+ // Chapter 9: Shaders
+ Result CreateShaderModule(const VkShaderModuleCreateInfo &rCreateInfo, VkShaderModule &rShaderModule) const
+ { return { vkCreateShaderModule(device, &rCreateInfo, 0, handle_cast<::VkShaderModule *>(&rShaderModule)), "vkCreateShaderModule" }; }
+
+ void DestroyShaderModule(VkShaderModule shaderModule) const
+ { vkDestroyShaderModule(device, handle_cast<::VkShaderModule>(shaderModule), 0); }
+
+ // Chapter 10: Pipelines
+ Result CreateGraphicsPipelines(VkPipelineCache pipelineCache, std::uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo *pCreateInfos, VkPipeline *pPipelines) const
+ { return { vkCreateGraphicsPipelines(device, handle_cast<::VkPipelineCache>(pipelineCache), createInfoCount, pCreateInfos, 0, handle_cast<::VkPipeline *>(pPipelines)), "vkCreateGraphicsPipelines" }; }
+
+ void DestroyPipeline(VkPipeline pipeline) const
+ { vkDestroyPipeline(device, handle_cast<::VkPipeline>(pipeline), 0); }
+
+ void CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const
+ { vkCmdBindPipeline(handle_cast<::VkCommandBuffer>(commandBuffer), pipelineBindPoint, handle_cast<::VkPipeline>(pipeline)); }
+
+ // Chapter 11: Memory Allocation
+ void GetPhysicalDeviceMemoryProperties(VkPhysicalDeviceMemoryProperties &rMemoryProperties) const
+ { vkGetPhysicalDeviceMemoryProperties(physicalDevice, &rMemoryProperties); }
+
+ Result AllocateMemory(const VkMemoryAllocateInfo &rAllocateInfo, VkDeviceMemory &rMemory) const
+ { return { vkAllocateMemory(device, &rAllocateInfo, 0, handle_cast<::VkDeviceMemory *>(&rMemory)), "vkAllocateMemory" }; }
+
+ Result MapMemory(VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **ppData) const
+ { return { vkMapMemory(device, handle_cast<::VkDeviceMemory>(memory), offset, size, flags, ppData), "vkMapMemory" }; }
+
+ void UnmapMemory(VkDeviceMemory memory) const
+ { vkUnmapMemory(device, handle_cast<::VkDeviceMemory>(memory)); }
+
+ void FreeMemory(VkDeviceMemory memory) const
+ { vkFreeMemory(device, handle_cast<::VkDeviceMemory>(memory), 0); }
+
+ // Chapter 12: Resource Creation
+ Result CreateBuffer(const VkBufferCreateInfo &rCreateInfo, VkBuffer &rBuffer) const
+ { return { vkCreateBuffer(device, &rCreateInfo, 0, handle_cast<::VkBuffer *>(&rBuffer)), "vkCreateBuffer" }; }
+
+ void DestroyBuffer(VkBuffer image) const
+ { vkDestroyBuffer(device, handle_cast<::VkBuffer>(image), 0); }
+
+ Result CreateImageView(const VkImageViewCreateInfo &rCreateInfo, VkImageView &rView) const
+ { return { vkCreateImageView(device, &rCreateInfo, 0, handle_cast<::VkImageView *>(&rView)), "vkCreateImageView" }; }
+
+ void DestroyImageView(VkImageView imageView) const
+ { vkDestroyImageView(device, handle_cast<::VkImageView>(imageView), 0); }
+
+ void GetBufferMemoryRequirements(VkBuffer image, VkMemoryRequirements &rMemoryRequirements) const
+ { vkGetBufferMemoryRequirements(device, handle_cast<::VkBuffer>(image), &rMemoryRequirements); }
+
+ Result BindBufferMemory(VkBuffer image, VkDeviceMemory memory, VkDeviceSize memoryOffset) const
+ { return { vkBindBufferMemory(device, handle_cast<::VkBuffer>(image), handle_cast<::VkDeviceMemory>(memory), memoryOffset), "vkBindBufferMemory" }; }
+
+ // Chapter 14: Resource Descriptors
+ Result CreateDescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo &rCreateInfo, VkDescriptorSetLayout &rSetLayout) const
+ { return { vkCreateDescriptorSetLayout(device, &rCreateInfo, 0, handle_cast<::VkDescriptorSetLayout *>(&rSetLayout)), "vkCreateDescriptorSetLayout" }; }
+
+ void DestroyDescriptorSetLayout(VkDescriptorSetLayout descriptorSetLayout) const
+ { vkDestroyDescriptorSetLayout(device, handle_cast<::VkDescriptorSetLayout>(descriptorSetLayout), 0); }
+
+ Result CreatePipelineLayout(const VkPipelineLayoutCreateInfo &rCreateInfo, VkPipelineLayout &rPipelineLayout) const
+ { return { vkCreatePipelineLayout(device, &rCreateInfo, 0, handle_cast<::VkPipelineLayout *>(&rPipelineLayout)), "vkCreatePipelineLayout" }; }
+
+ void DestroyPipelineLayout(VkPipelineLayout pipelineLayout) const
+ { vkDestroyPipelineLayout(device, handle_cast<::VkPipelineLayout>(pipelineLayout), 0); }
+
+ Result CreateDescriptorPool(const VkDescriptorPoolCreateInfo &rCreateInfo, VkDescriptorPool &rDescriptorPool) const
+ { return { vkCreateDescriptorPool(device, &rCreateInfo, 0, handle_cast<::VkDescriptorPool *>(&rDescriptorPool)), "vkCreateDescriptorPool" }; }
+
+ void DestroyDescriptorPool(VkDescriptorPool descriptorPool) const
+ { vkDestroyDescriptorPool(device, handle_cast<::VkDescriptorPool>(descriptorPool), 0); }
+
+ Result AllocateDescriptorSets(const VkDescriptorSetAllocateInfo &rAllocateInfo, VkDescriptorSet *pDescriptorSets) const
+ { return { vkAllocateDescriptorSets(device, &rAllocateInfo, handle_cast<::VkDescriptorSet *>(pDescriptorSets)), "vkAllocateDescriptorSets" }; }
+
+ void UpdateDescriptorSets(std::uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, std::uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) const
+ { vkUpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); }
+
+ void CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, std::uint32_t firstSet, std::uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets, std::uint32_t dynamicOffsetCount, const std::uint32_t *pDynamicOffsets) const
+ { vkCmdBindDescriptorSets(handle_cast<::VkCommandBuffer>(commandBuffer), pipelineBindPoint, handle_cast<::VkPipelineLayout>(layout), firstSet, descriptorSetCount, handle_cast<const ::VkDescriptorSet *>(pDescriptorSets), dynamicOffsetCount, pDynamicOffsets); }
+
+ void CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, std::uint32_t offset, std::uint32_t size, const void *pValues) const
+ { vkCmdPushConstants(handle_cast<::VkCommandBuffer>(commandBuffer), handle_cast<::VkPipelineLayout>(layout), stageFlags, offset, size, pValues); }
+
+ // Chapter 19: Copy Commands
+ void CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, std::uint32_t regionCount, const VkBufferCopy *pRegions) const
+ { vkCmdCopyBuffer(handle_cast<::VkCommandBuffer>(commandBuffer), handle_cast<::VkBuffer>(srcBuffer), handle_cast<::VkBuffer>(dstBuffer), regionCount, pRegions); }
+
+ // Chapter 20: Drawing Commands
+ void CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const
+ { vkCmdBindIndexBuffer(handle_cast<::VkCommandBuffer>(commandBuffer), handle_cast<::VkBuffer>(buffer), offset, indexType); }
+
+ void CmdDrawIndexed(VkCommandBuffer commandBuffer, std::uint32_t indexCount, std::uint32_t instanceCount, std::uint32_t firstIndex, std::int32_t vertexOffset, std::uint32_t firstInstance) const
+ { vkCmdDrawIndexed(handle_cast<::VkCommandBuffer>(commandBuffer), indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); }
+
+ // Chapter 21: Fixed-Function Vertex Processing
+ void CmdBindVertexBuffers(VkCommandBuffer commandBuffer, std::uint32_t firstBinding, std::uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const
+ { vkCmdBindVertexBuffers(handle_cast<::VkCommandBuffer>(commandBuffer), firstBinding, bindingCount, handle_cast<const ::VkBuffer *>(pBuffers), pOffsets); }
+
+ // Chapter 24: Fixed-Function Vertex Post-Processing
+ void CmdSetViewport(VkCommandBuffer commandBuffer, std::uint32_t firstViewport, std::uint32_t viewportCount, const VkViewport *pViewports) const
+ { vkCmdSetViewport(handle_cast<::VkCommandBuffer>(commandBuffer), firstViewport, viewportCount, pViewports); }
+
+ // Chapter 26: Fragment Operations
+ void CmdSetScissor(VkCommandBuffer commandBuffer, std::uint32_t firstScissor, std::uint32_t scissorCount, const VkRect2D *pScissors) const
+ { vkCmdSetScissor(handle_cast<::VkCommandBuffer>(commandBuffer), firstScissor, scissorCount, pScissors); }
+
+ // Chapter 30: Window System Integration (WSI)
+ Result GetPhysicalDeviceSurfaceCapabilities(VkSurface surface, VkSurfaceCapabilitiesKHR &rSurfaceCapabilities) const
+ { return { vkGetPhysicalDeviceSurfaceCapabilities(physicalDevice, handle_cast<::VkSurfaceKHR>(surface), &rSurfaceCapabilities), "vkGetPhysicalDeviceSurfaceCapabilities" }; }
+
+ Result GetPhysicalDeviceSurfaceFormats(VkSurface surface, std::uint32_t &rSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) const
+ { return { vkGetPhysicalDeviceSurfaceFormats(physicalDevice, handle_cast<::VkSurfaceKHR>(surface), &rSurfaceFormatCount, pSurfaceFormats), "vkGetPhysicalDeviceSurfaceFormats" }; }
+
+ Result GetPhysicalDeviceSurfacePresentModes(VkSurface surface, std::uint32_t &rPresentModeCount, VkPresentModeKHR *pPresentModes) const
+ { return { vkGetPhysicalDeviceSurfacePresentModes(physicalDevice, handle_cast<::VkSurfaceKHR>(surface), &rPresentModeCount, pPresentModes), "vkGetPhysicalDeviceSurfacePresentModes" }; }
+
+ Result CreateSwapchain(const VkSwapchainCreateInfoKHR &rCreateInfo, VkSwapchain &rSwapchain) const
+ { return { vkCreateSwapchain(device, &rCreateInfo, 0, handle_cast<::VkSwapchainKHR *>(&rSwapchain)), "vkCreateSwapchain" }; }
+
+ void DestroySwapchain(VkSwapchain swapchain) const
+ { vkDestroySwapchain(device, handle_cast<::VkSwapchainKHR>(swapchain), 0); }
+
+ void GetSwapchainImages(VkSwapchain swapchain, std::uint32_t &rSwapchainImageCount, VkImage *pSwapchainImages) const
+ { vkGetSwapchainImages(device, handle_cast<::VkSwapchainKHR>(swapchain), &rSwapchainImageCount, handle_cast<::VkImage *>(pSwapchainImages)); }
+
+ Result AcquireNextImage(VkSwapchain swapchain, std::uint64_t timeout, VkSemaphore semaphore, VkFence fence, std::uint32_t &rImageIndex) const
+ { return { vkAcquireNextImageKHR(device, handle_cast<::VkSwapchainKHR>(swapchain), timeout, handle_cast<::VkSemaphore>(semaphore), handle_cast<::VkFence>(fence), &rImageIndex), "vkAcquireNextImageKHR" }; }
+
+ Result QueuePresent(const VkPresentInfoKHR &rPresentInfo) const
+ { return { vkQueuePresent(graphicsQueue, &rPresentInfo), "vkQueuePresent" }; }
+
+ // Chapter 39: Formats
+ void GetPhysicalDeviceFormatProperties(VkFormat format, VkFormatProperties &rFormatProperties) const
+ { vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &rFormatProperties); }
+
+ // Chapter 45: Debugging
+ Result SetDebugUtilsObjectName(const VkDebugUtilsObjectNameInfoEXT &rNameInfo) const
+ { return { vkSetDebugUtilsObjectName(device, &rNameInfo), "vkSetDebugUtilsObjectName" }; }
+};
+
+} // namespace GL
+} // namespace Msp
+
+#endif
--- /dev/null
+#include "device.h"
+#include "renderer.h"
+#include "vulkan.h"
+#include "windowview.h"
+#include "windowview_backend.h"
+
+namespace Msp {
+namespace GL {
+
+VulkanWindowView::~VulkanWindowView()
+{
+ delete swap_chain;
+}
+
+void VulkanWindowView::render()
+{
+ Device &device = static_cast<const WindowView *>(this)->device;
+
+ Semaphore *sem = semaphores+frame_index*2;
+ unsigned image_index = swap_chain->begin_frame(sem[0]);
+ current_target = &framebuffers[image_index];
+
+ if(!internal_renderer)
+ internal_renderer = new Renderer;
+ internal_renderer->begin(sem[0]);
+ View::render(*internal_renderer);
+ internal_renderer->end(sem[1]);
+
+ swap_chain->present_frame(sem[1]);
+ frame_index = (frame_index+1)%device.get_n_frames_in_flight();
+
+ device.get_destroy_queue().tick();
+}
+
+void VulkanWindowView::resize_framebuffer(unsigned w, unsigned h)
+{
+ Device &device = static_cast<const WindowView *>(this)->device;
+
+ framebuffers.clear();
+ delete swap_chain;
+
+ swap_chain = new SwapChain(w, h, device.get_n_frames_in_flight());
+
+ unsigned n_images = swap_chain->get_n_images();
+ framebuffers.reserve(n_images);
+ for(unsigned i=0; i<n_images; ++i)
+ {
+ const SwapChainTexture &image = swap_chain->get_image(i);
+ framebuffers.emplace_back((COLOR_ATTACHMENT, image.get_format()));
+ framebuffers.back().attach(COLOR_ATTACHMENT, swap_chain->get_image(i));
+ }
+
+ current_target = &framebuffers.front();
+}
+
+} // namespace GL
+} // namespace Msp
--- /dev/null
+#ifndef MSP_GL_WINDOWVIEW_BACKEND_H_
+#define MSP_GL_WINDOWVIEW_BACKEND_H_
+
+#include "device.h"
+#include "framebuffer.h"
+#include "semaphore.h"
+#include "swapchain.h"
+#include "view.h"
+
+namespace Msp {
+namespace GL {
+
+class VulkanWindowView: public View
+{
+protected:
+ SwapChain *swap_chain = 0;
+ std::vector<Framebuffer> framebuffers;
+ Framebuffer *current_target = 0;
+ Semaphore semaphores[MAX_FRAMES_IN_FLIGHT*2];
+ unsigned frame_index = 0;
+
+ VulkanWindowView() = default;
+ ~VulkanWindowView();
+
+ virtual void render();
+ virtual const Framebuffer &get_target() const { return *current_target; }
+
+ void resize_framebuffer(unsigned, unsigned);
+};
+
+using WindowViewBackend = VulkanWindowView;
+
+} // namespace GL
+} // namespace Msp
+
+#endif
::append<uint16_t>(data, i);
}
-size_t Batch::get_index_size() const
-{
- return (index_type==UNSIGNED_INT ? sizeof(uint32_t) : sizeof(uint16_t));
-}
-
unsigned Batch::get_index(size_t i) const
{
if(index_type==UNSIGNED_INT)
virtual std::size_t get_data_size() const { return data.size(); }
virtual const void *get_data_pointer() const { return &data[0]; }
virtual std::size_t get_alignment() const { return get_index_size(); }
- std::size_t get_index_size() const;
public:
+ std::size_t get_index_size() const { return get_type_size(index_type); }
std::size_t size() const { return data.size()/get_index_size(); }
unsigned get_index(std::size_t) const;
class Commands: public CommandsBackend
{
public:
+ using CommandsBackend::begin_frame;
+ using CommandsBackend::submit_frame;
+
using CommandsBackend::use_pipeline;
using CommandsBackend::clear;
}
reflect();
+ create();
}
void SpirVModule::compile(SL::Compiler &compiler)
compiler.compile(SL::Compiler::SPIRV);
code = compiler.get_combined_spirv();
reflect();
+ create();
}
void SpirVModule::reflect()
*/
class SpirVModule: public Module, public SpirVModuleBackend
{
+ friend SpirVModuleBackend;
+
public:
enum Stage
{
void set_blend(const Blend *);
const Framebuffer *get_framebuffer() const { return framebuffer; }
+ const Rect *get_viewport() const { return viewport; }
const Program *get_shader_program() const { return shprog; }
const VertexSetup *get_vertex_setup() const { return vertex_setup; }
FaceWinding get_front_face() const { return front_face; }
bool operator==(const VertexFormat &) const;
bool operator!=(const VertexFormat &other) const { return !(*this==other); }
+ unsigned size() const { return count; }
bool empty() const { return !count; }
const VertexAttribute *begin() const { return attributes; }
const VertexAttribute *end() const { return attributes+count; }
} // namespace GL
} // namespace Msp
+#include "vertexformat_backend.h"
+
#endif
proj_matrix = Matrix::ortho(left, right, bottom, top, clip_near, clip_far);
proj_matrix = Matrix::rotation(rotate, Vector3(0, 0, 1))*proj_matrix;
+ adjust_projection_matrix(proj_matrix);
+
shdata.uniform("clip_eye_matrix", proj_matrix);
shdata.uniform("eye_clip_matrix", invert(proj_matrix));
}
#define MSP_GL_CAMERA_H_
#include <msp/datafile/objectloader.h>
+#include "camera_backend.h"
#include "placeable.h"
#include "programdata.h"
directions. Setting the up direction to the opposite of gravity direction is
an easy way to keep the camera upright.
*/
-class Camera: public Placeable
+class Camera: public CameraBackend, public Placeable
{
public:
class Loader: public DataFile::ObjectLoader<Camera>
if(current_state)
throw invalid_operation("Renderer::begin");
+ ++frame_index;
state_stack.emplace_back();
current_state = &state_stack.back();
RendererBackend::begin();
add_shader_data(standard_shdata);
+ commands.begin_frame(frame_index);
}
void Renderer::end()
SHADER_DATA = 16
};
+ unsigned frame_index = 0;
unsigned char changed = 0;
std::vector<State> state_stack;
State *current_state = 0;
commands are allowed before the next call to begin(). */
void end();
+ using RendererBackend::begin;
+ using RendererBackend::end;
+
/** Saves the current state so it can be restored later. */
void push_state();
class LoadingThread: public Thread
{
private:
- Semaphore sem;
+ Msp::Semaphore sem;
Mutex queue_mutex;
std::deque<ManagedResource *> async_queue;
std::deque<ManagedResource *> sync_queue;
#include <msp/fs/utils.h>
#include "animation.h"
#include "armature.h"
+#include "backend.h"
#include "basicmaterial.h"
#include "camera.h"
#include "directionallight.h"
{
if(ext==".glsl")
{
- RefPtr<GlslModule> module = new GlslModule;
+ RefPtr<Module> module;
+ if(get_backend_api()==VULKAN)
+ module = new SpirVModule;
+ else
+ module = new GlslModule;
module->load_source(*io, this, name);
return module.release();
}