From: Mikko Rasa Date: Sat, 11 Dec 2021 23:40:19 +0000 (+0200) Subject: Refactor TransferQueue to require explicit finalization of transfers X-Git-Url: http://git.tdb.fi/?a=commitdiff_plain;h=33253bf6d6a330181fda83ba23a6ac0a756d9a8d;p=libs%2Fgl.git Refactor TransferQueue to require explicit finalization of transfers This allows the data for the transfer to be written asynchronously. --- diff --git a/source/backends/vulkan/buffer_backend.cpp b/source/backends/vulkan/buffer_backend.cpp index 21e1dabe..548f0a51 100644 --- a/source/backends/vulkan/buffer_backend.cpp +++ b/source/backends/vulkan/buffer_backend.cpp @@ -52,7 +52,8 @@ void VulkanBuffer::allocate() void VulkanBuffer::sub_data(size_t off, size_t sz, const void *d) { - void *staging = device.get_transfer_queue().prepare_transfer(this, false, sz, + TransferQueue &tq = device.get_transfer_queue(); + void *staging = tq.prepare_transfer(this, false, sz, [this, off, sz](){ device.get_synchronizer().write_buffer(handle, off, sz); }, @@ -68,6 +69,7 @@ void VulkanBuffer::sub_data(size_t off, size_t sz, const void *d) const char *src = static_cast(d); copy(src, src+sz, static_cast(staging)); + tq.finalize_transfer(staging); } bool VulkanBuffer::can_map() const diff --git a/source/backends/vulkan/texture1d_backend.cpp b/source/backends/vulkan/texture1d_backend.cpp index d7416484..539060da 100644 --- a/source/backends/vulkan/texture1d_backend.cpp +++ b/source/backends/vulkan/texture1d_backend.cpp @@ -28,8 +28,9 @@ void VulkanTexture1D::sub_image(unsigned level, int x, unsigned wd, const void * unsigned level_size = self.get_level_size(level); bool discard = (x==0 && wd==level_size); + TransferQueue &tq = device.get_transfer_queue(); size_t data_size = wd*get_pixel_size(storage_fmt); - void *staging = device.get_transfer_queue().prepare_transfer(this, false, data_size, + void *staging = tq.prepare_transfer(this, false, data_size, [this, level, discard](){ change_layout(level, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, discard); }, @@ -48,6 +49,7 @@ void VulkanTexture1D::sub_image(unsigned level, int x, unsigned wd, const void * }); stage_pixels(staging, data, wd); + tq.finalize_transfer(staging); } void VulkanTexture1D::fill_mipmap_blit(unsigned level, void *b) diff --git a/source/backends/vulkan/texture2d_backend.cpp b/source/backends/vulkan/texture2d_backend.cpp index 3e88898b..7edee3b2 100644 --- a/source/backends/vulkan/texture2d_backend.cpp +++ b/source/backends/vulkan/texture2d_backend.cpp @@ -29,8 +29,9 @@ void VulkanTexture2D::sub_image(unsigned level, int x, int y, unsigned wd, unsig auto level_size = self.get_level_size(level); bool discard = (x==0 && y==0 && wd==level_size.x && ht==level_size.y); + TransferQueue &tq = device.get_transfer_queue(); size_t data_size = wd*ht*get_pixel_size(storage_fmt); - void *staging = device.get_transfer_queue().prepare_transfer(this, false, data_size, + void *staging = tq.prepare_transfer(this, false, data_size, [this, level, discard](){ change_layout(level, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, discard); }, @@ -49,6 +50,7 @@ void VulkanTexture2D::sub_image(unsigned level, int x, int y, unsigned wd, unsig }); stage_pixels(staging, data, wd*ht); + tq.finalize_transfer(staging); } void VulkanTexture2D::fill_mipmap_blit(unsigned level, void *b) diff --git a/source/backends/vulkan/texture3d_backend.cpp b/source/backends/vulkan/texture3d_backend.cpp index 05ed5182..67adb8d1 100644 --- a/source/backends/vulkan/texture3d_backend.cpp +++ b/source/backends/vulkan/texture3d_backend.cpp @@ -34,8 +34,9 @@ void VulkanTexture3D::sub_image(unsigned level, int x, int y, int z, unsigned wd auto level_size = self.get_level_size(level); bool discard = (x==0 && y==0 && z==0 && wd==level_size.x && ht==level_size.y && dp==level_size.z); + TransferQueue &tq = device.get_transfer_queue(); size_t data_size = wd*ht*dp*get_pixel_size(storage_fmt); - void *staging = device.get_transfer_queue().prepare_transfer(this, false, data_size, + void *staging = tq.prepare_transfer(this, false, data_size, [this, level, discard](){ change_layout(level, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, discard); }, @@ -54,6 +55,7 @@ void VulkanTexture3D::sub_image(unsigned level, int x, int y, int z, unsigned wd }); stage_pixels(staging, data, wd*ht*dp); + tq.finalize_transfer(staging); } void VulkanTexture3D::fill_mipmap_blit(unsigned level, void *b) diff --git a/source/backends/vulkan/texture_backend.cpp b/source/backends/vulkan/texture_backend.cpp index 237b9845..217299bf 100644 --- a/source/backends/vulkan/texture_backend.cpp +++ b/source/backends/vulkan/texture_backend.cpp @@ -178,7 +178,7 @@ void VulkanTexture::generate_mipmap() TransferQueue &tq = device.get_transfer_queue(); for(unsigned i=0; i+1 +#include #include "device.h" #include "transferqueue.h" #include "vulkan.h" @@ -12,36 +13,65 @@ TransferQueue::TransferQueue(Device &d): device(d) { } -TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size) +void TransferQueue::allocate_staging(PendingTransfer &transfer, size_t size) { - unsigned &order = next_orders[object]; - order += !order; - order += (order&1)|ordered; + auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; }); + if(i==buffers.end()) + { + buffers.emplace_back(device, max(default_buffer_size, size)); + i = prev(buffers.end()); + } - auto j = upper_bound_member(transfers, order, &PendingTransfer::order); + transfer.buffer_index = distance(buffers.begin(), i); + transfer.offset = i->used; + transfer.size = size; + transfer.staging_address = static_cast(i->mapped_address)+transfer.offset; - PendingTransfer &transfer = *transfers.emplace(j); - transfer.order = order; + i->used += size; +} + +TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size) +{ + PendingTransfer transfer; + transfer.object = object; + transfer.order = ordered; if(size) { - auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; }); - if(i==buffers.end()) - { - buffers.emplace_back(device, max(default_buffer_size, size)); - i = prev(buffers.end()); - } + allocate_staging(transfer, size); + auto i = lower_bound_member(async_transfers, transfer.staging_address, &PendingTransfer::staging_address); + i = async_transfers.emplace(i, move(transfer)); + return *i; + } + else + return insert_transfer(move(transfer)); +} + +void TransferQueue::finalize_transfer(void *staging) +{ + auto i = lower_bound_member(async_transfers, staging, &PendingTransfer::staging_address); + if(i==async_transfers.end() || i->staging_address!=staging) + throw key_error(staging); - transfer.buffer_index = distance(buffers.begin(), i); - transfer.offset = i->used; - transfer.size = size; + insert_transfer(move(*i)); + async_transfers.erase(i); +} - i->used += size; - } +TransferQueue::PendingTransfer &TransferQueue::insert_transfer(PendingTransfer &&pt) +{ + bool ordered = pt.order; + + unsigned &order = next_orders[pt.object]; + order += !order; + order += (order&1)|ordered; + + auto j = upper_bound_member(transfers, order, &PendingTransfer::order); + j = transfers.emplace(j, move(pt)); + j->order = order; order += ordered; - return transfer; + return *j; } void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer) diff --git a/source/backends/vulkan/transferqueue.h b/source/backends/vulkan/transferqueue.h index 58b1bc2d..592bb69e 100644 --- a/source/backends/vulkan/transferqueue.h +++ b/source/backends/vulkan/transferqueue.h @@ -30,10 +30,12 @@ private: struct PendingTransfer { + const void *object = 0; unsigned order = 0; int buffer_index = -1; std::size_t offset = 0; std::size_t size = 0; + void *staging_address = 0; std::function synchronize; std::function transfer; }; @@ -42,6 +44,7 @@ private: std::size_t default_buffer_size = 16*1048576; std::vector buffers; std::vector transfers; + std::vector async_transfers; std::map next_orders; public: @@ -50,8 +53,16 @@ public: template void *prepare_transfer(const void *, bool, std::size_t, S &&, T &&); + template + void prepare_transfer(const void *o, bool r, S &&s, T &&t) + { prepare_transfer(o, r, 0, std::forward(s), std::forward(t)); } + + void finalize_transfer(void *); + private: + void allocate_staging(PendingTransfer &, std::size_t); PendingTransfer &prepare_transfer(const void *, bool, std::size_t); + PendingTransfer &insert_transfer(PendingTransfer &&); public: void dispatch_transfers(VkCommandBuffer); @@ -63,7 +74,7 @@ void *TransferQueue::prepare_transfer(const void *object, bool ordered, std::siz PendingTransfer &pt = prepare_transfer(object, ordered, size); pt.synchronize = std::forward(synchronize); pt.transfer = std::forward(transfer); - return (pt.buffer_index<0 ? 0 : static_cast(buffers[pt.buffer_index].mapped_address)+pt.offset); + return pt.staging_address; } } // namespace GL