]> git.tdb.fi Git - libs/gl.git/blob - source/backends/vulkan/transferqueue.cpp
Support multiple PipelineStates in Renderer
[libs/gl.git] / source / backends / vulkan / transferqueue.cpp
1 #include <msp/core/algorithm.h>
2 #include <msp/core/maputils.h>
3 #include "device.h"
4 #include "transferqueue.h"
5 #include "vulkan.h"
6
7 using namespace std;
8
9 namespace Msp {
10 namespace GL {
11
12 TransferQueue::TransferQueue(Device &d):
13         device(d)
14 { }
15
16 void TransferQueue::allocate_staging(PendingTransfer &transfer, size_t size)
17 {
18         auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
19         if(i==buffers.end())
20         {
21                 buffers.emplace_back(device, max(default_buffer_size, size));
22                 i = prev(buffers.end());
23         }
24
25         transfer.buffer_index = distance(buffers.begin(), i);
26         transfer.offset = i->used;
27         transfer.size = size;
28         transfer.staging_address = static_cast<char *>(i->mapped_address)+transfer.offset;
29
30         i->used += size;
31         ++i->async_count;
32 }
33
34 TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size)
35 {
36         PendingTransfer transfer;
37         transfer.object = object;
38         transfer.order = ordered;
39
40         if(size)
41         {
42                 allocate_staging(transfer, size);
43                 auto i = lower_bound_member(async_transfers, transfer.staging_address, &PendingTransfer::staging_address);
44                 i = async_transfers.emplace(i, move(transfer));
45                 return *i;
46         }
47         else
48                 return insert_transfer(move(transfer));
49 }
50
51 void TransferQueue::finalize_transfer(void *staging)
52 {
53         auto i = lower_bound_member(async_transfers, staging, &PendingTransfer::staging_address);
54         if(i==async_transfers.end() || i->staging_address!=staging)
55                 throw key_error(staging);
56
57         if(i->buffer_index>=0)
58                 --buffers[i->buffer_index].async_count;
59         insert_transfer(move(*i));
60         async_transfers.erase(i);
61 }
62
63 TransferQueue::PendingTransfer &TransferQueue::insert_transfer(PendingTransfer &&pt)
64 {
65         bool ordered = pt.order;
66
67         unsigned &order = next_orders[pt.object];
68         order += !order;
69         order += (order&1)|ordered;
70
71         auto j = upper_bound_member(transfers, order, &PendingTransfer::order);
72         j = transfers.emplace(j, move(pt));
73         j->order = order;
74
75         order += ordered;
76
77         return *j;
78 }
79
80 void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer)
81 {
82         if(transfers.empty())
83                 return;
84
85         ++current_frame;
86         for(auto i=transfers.begin(); i!=transfers.end(); )
87         {
88                 auto j = i;
89                 for(; (j!=transfers.end() && j->order==i->order); ++j)
90                         j->synchronize();
91
92                 device.get_synchronizer().barrier(command_buffer);
93
94                 for(; i!=j; ++i)
95                 {
96                         VkBuffer buffer = (i->buffer_index>=0 ? buffers[i->buffer_index].buffer : 0);
97                         i->transfer(command_buffer, buffer, i->offset);
98                         if(i->buffer_index>=0)
99                                 buffers[i->buffer_index].last_frame = current_frame;
100                 }
101         }
102
103         transfers.clear();
104         next_orders.clear();
105
106         unsigned n_frames_in_flight = device.get_n_frames_in_flight();
107         for(StagingBuffer &b: buffers)
108                 if(!b.async_count && b.last_frame+n_frames_in_flight<current_frame)
109                         b.used = 0;
110 }
111
112
113 TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
114         device(d),
115         size(s)
116 {
117         const VulkanFunctions &vk = device.get_functions();
118
119         VkBufferCreateInfo buffer_info = { };
120         buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
121         buffer_info.size = size;
122         buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
123         buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
124
125         vk.CreateBuffer(buffer_info, buffer);
126
127         MemoryAllocator &allocator = device.get_allocator();
128         memory_id = allocator.allocate(buffer, STAGING_MEMORY);
129         mapped_address = allocator.map(memory_id, 0, size);
130 }
131
132 TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
133         device(other.device),
134         buffer(other.buffer),
135         memory_id(other.memory_id),
136         size(other.size),
137         used(other.used),
138         mapped_address(other.mapped_address)
139 {
140         other.buffer = 0;
141         other.memory_id = 0;
142         other.mapped_address = 0;
143 }
144
145 TransferQueue::StagingBuffer::~StagingBuffer()
146 {
147         const VulkanFunctions &vk = device.get_functions();
148         MemoryAllocator &allocator = device.get_allocator();
149
150         if(mapped_address)
151         {
152                 allocator.unmap(mapped_address);
153                 allocator.release(memory_id);
154         }
155         if(buffer)
156                 vk.DestroyBuffer(buffer);
157 }
158
159 } // namespace GL
160 } // namespace Msp