1 #include <msp/core/algorithm.h>
2 #include <msp/core/maputils.h>
4 #include "transferqueue.h"
12 TransferQueue::TransferQueue(Device &d):
16 void TransferQueue::allocate_staging(PendingTransfer &transfer, size_t size)
18 auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
21 buffers.emplace_back(device, max(default_buffer_size, size));
22 i = prev(buffers.end());
25 transfer.buffer_index = distance(buffers.begin(), i);
26 transfer.offset = i->used;
28 transfer.staging_address = static_cast<char *>(i->mapped_address)+transfer.offset;
31 i->used -= i->used%48;
35 TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size)
37 PendingTransfer transfer;
38 transfer.object = object;
39 transfer.order = ordered;
43 allocate_staging(transfer, size);
44 auto i = lower_bound_member(async_transfers, transfer.staging_address, &PendingTransfer::staging_address);
45 i = async_transfers.emplace(i, move(transfer));
49 return insert_transfer(move(transfer));
52 void TransferQueue::finalize_transfer(void *staging)
54 auto i = lower_bound_member(async_transfers, staging, &PendingTransfer::staging_address);
55 if(i==async_transfers.end() || i->staging_address!=staging)
56 throw key_error(staging);
58 if(i->buffer_index>=0)
59 --buffers[i->buffer_index].async_count;
60 insert_transfer(move(*i));
61 async_transfers.erase(i);
64 TransferQueue::PendingTransfer &TransferQueue::insert_transfer(PendingTransfer &&pt)
66 bool ordered = pt.order;
68 unsigned &order = next_orders[pt.object];
70 order += (order&1)|ordered;
72 auto j = upper_bound_member(transfers, order, &PendingTransfer::order);
73 j = transfers.emplace(j, move(pt));
81 void TransferQueue::dispatch_transfers(const VulkanCommandRecorder &vkCmd)
87 for(auto i=transfers.begin(); i!=transfers.end(); )
90 for(; (j!=transfers.end() && j->order==i->order); ++j)
93 device.get_synchronizer().barrier(vkCmd);
97 VkBuffer buffer = (i->buffer_index>=0 ? buffers[i->buffer_index].buffer : 0);
98 i->transfer(vkCmd, buffer, i->offset);
99 if(i->buffer_index>=0)
100 buffers[i->buffer_index].last_frame = current_frame;
107 unsigned n_frames_in_flight = device.get_n_frames_in_flight();
108 for(StagingBuffer &b: buffers)
109 if(!b.async_count && b.last_frame+n_frames_in_flight<current_frame)
114 TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
118 const VulkanFunctions &vk = device.get_functions();
120 VkBufferCreateInfo buffer_info = { };
121 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
122 buffer_info.size = size;
123 buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
124 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
126 vk.CreateBuffer(buffer_info, buffer);
128 MemoryAllocator &allocator = device.get_allocator();
129 memory_id = allocator.allocate(buffer, STAGING_MEMORY);
130 mapped_address = allocator.map(memory_id);
133 TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
134 device(other.device),
135 buffer(other.buffer),
136 memory_id(other.memory_id),
139 mapped_address(other.mapped_address)
143 other.mapped_address = 0;
146 TransferQueue::StagingBuffer::~StagingBuffer()
148 const VulkanFunctions &vk = device.get_functions();
149 MemoryAllocator &allocator = device.get_allocator();
153 allocator.unmap(memory_id);
154 allocator.release(memory_id);
157 vk.DestroyBuffer(buffer);