]> git.tdb.fi Git - libs/gl.git/blob - source/backends/vulkan/transferqueue.cpp
Refactor TransferQueue to require explicit finalization of transfers
[libs/gl.git] / source / backends / vulkan / transferqueue.cpp
1 #include <msp/core/algorithm.h>
2 #include <msp/core/maputils.h>
3 #include "device.h"
4 #include "transferqueue.h"
5 #include "vulkan.h"
6
7 using namespace std;
8
9 namespace Msp {
10 namespace GL {
11
12 TransferQueue::TransferQueue(Device &d):
13         device(d)
14 { }
15
16 void TransferQueue::allocate_staging(PendingTransfer &transfer, size_t size)
17 {
18         auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
19         if(i==buffers.end())
20         {
21                 buffers.emplace_back(device, max(default_buffer_size, size));
22                 i = prev(buffers.end());
23         }
24
25         transfer.buffer_index = distance(buffers.begin(), i);
26         transfer.offset = i->used;
27         transfer.size = size;
28         transfer.staging_address = static_cast<char *>(i->mapped_address)+transfer.offset;
29
30         i->used += size;
31 }
32
33 TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size)
34 {
35         PendingTransfer transfer;
36         transfer.object = object;
37         transfer.order = ordered;
38
39         if(size)
40         {
41                 allocate_staging(transfer, size);
42                 auto i = lower_bound_member(async_transfers, transfer.staging_address, &PendingTransfer::staging_address);
43                 i = async_transfers.emplace(i, move(transfer));
44                 return *i;
45         }
46         else
47                 return insert_transfer(move(transfer));
48 }
49
50 void TransferQueue::finalize_transfer(void *staging)
51 {
52         auto i = lower_bound_member(async_transfers, staging, &PendingTransfer::staging_address);
53         if(i==async_transfers.end() || i->staging_address!=staging)
54                 throw key_error(staging);
55
56         insert_transfer(move(*i));
57         async_transfers.erase(i);
58 }
59
60 TransferQueue::PendingTransfer &TransferQueue::insert_transfer(PendingTransfer &&pt)
61 {
62         bool ordered = pt.order;
63
64         unsigned &order = next_orders[pt.object];
65         order += !order;
66         order += (order&1)|ordered;
67
68         auto j = upper_bound_member(transfers, order, &PendingTransfer::order);
69         j = transfers.emplace(j, move(pt));
70         j->order = order;
71
72         order += ordered;
73
74         return *j;
75 }
76
77 void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer)
78 {
79         if(transfers.empty())
80                 return;
81
82         for(auto i=transfers.begin(); i!=transfers.end(); )
83         {
84                 auto j = i;
85                 for(; (j!=transfers.end() && j->order==i->order); ++j)
86                         j->synchronize();
87
88                 device.get_synchronizer().barrier(command_buffer);
89
90                 for(; i!=j; ++i)
91                 {
92                         VkBuffer buffer = (i->buffer_index>=0 ? buffers[i->buffer_index].buffer : 0);
93                         i->transfer(command_buffer, buffer, i->offset);
94                 }
95         }
96
97         transfers.clear();
98         next_orders.clear();
99 }
100
101
102 TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
103         device(d),
104         size(s)
105 {
106         const VulkanFunctions &vk = device.get_functions();
107
108         VkBufferCreateInfo buffer_info = { };
109         buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
110         buffer_info.size = size;
111         buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
112         buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
113
114         vk.CreateBuffer(buffer_info, buffer);
115
116         MemoryAllocator &allocator = device.get_allocator();
117         memory_id = allocator.allocate(buffer, STAGING_MEMORY);
118         mapped_address = allocator.map(memory_id, 0, size);
119 }
120
121 TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
122         device(other.device),
123         buffer(other.buffer),
124         memory_id(other.memory_id),
125         size(other.size),
126         used(other.used),
127         mapped_address(other.mapped_address)
128 {
129         other.buffer = 0;
130         other.memory_id = 0;
131         other.mapped_address = 0;
132 }
133
134 TransferQueue::StagingBuffer::~StagingBuffer()
135 {
136         const VulkanFunctions &vk = device.get_functions();
137         MemoryAllocator &allocator = device.get_allocator();
138
139         if(mapped_address)
140         {
141                 allocator.unmap(mapped_address);
142                 allocator.release(memory_id);
143         }
144         if(buffer)
145                 vk.DestroyBuffer(buffer);
146 }
147
148 } // namespace GL
149 } // namespace Msp