]> git.tdb.fi Git - libs/gl.git/blob - source/backends/vulkan/transferqueue.cpp
Align staging memory with an alignment suitable for image transfers
[libs/gl.git] / source / backends / vulkan / transferqueue.cpp
1 #include <msp/core/algorithm.h>
2 #include <msp/core/maputils.h>
3 #include "device.h"
4 #include "transferqueue.h"
5 #include "vulkan.h"
6
7 using namespace std;
8
9 namespace Msp {
10 namespace GL {
11
12 TransferQueue::TransferQueue(Device &d):
13         device(d)
14 { }
15
16 void TransferQueue::allocate_staging(PendingTransfer &transfer, size_t size)
17 {
18         auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
19         if(i==buffers.end())
20         {
21                 buffers.emplace_back(device, max(default_buffer_size, size));
22                 i = prev(buffers.end());
23         }
24
25         transfer.buffer_index = distance(buffers.begin(), i);
26         transfer.offset = i->used;
27         transfer.size = size;
28         transfer.staging_address = static_cast<char *>(i->mapped_address)+transfer.offset;
29
30         i->used += size+47;
31         i->used -= i->used%48;
32         ++i->async_count;
33 }
34
35 TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size)
36 {
37         PendingTransfer transfer;
38         transfer.object = object;
39         transfer.order = ordered;
40
41         if(size)
42         {
43                 allocate_staging(transfer, size);
44                 auto i = lower_bound_member(async_transfers, transfer.staging_address, &PendingTransfer::staging_address);
45                 i = async_transfers.emplace(i, move(transfer));
46                 return *i;
47         }
48         else
49                 return insert_transfer(move(transfer));
50 }
51
52 void TransferQueue::finalize_transfer(void *staging)
53 {
54         auto i = lower_bound_member(async_transfers, staging, &PendingTransfer::staging_address);
55         if(i==async_transfers.end() || i->staging_address!=staging)
56                 throw key_error(staging);
57
58         if(i->buffer_index>=0)
59                 --buffers[i->buffer_index].async_count;
60         insert_transfer(move(*i));
61         async_transfers.erase(i);
62 }
63
64 TransferQueue::PendingTransfer &TransferQueue::insert_transfer(PendingTransfer &&pt)
65 {
66         bool ordered = pt.order;
67
68         unsigned &order = next_orders[pt.object];
69         order += !order;
70         order += (order&1)|ordered;
71
72         auto j = upper_bound_member(transfers, order, &PendingTransfer::order);
73         j = transfers.emplace(j, move(pt));
74         j->order = order;
75
76         order += ordered;
77
78         return *j;
79 }
80
81 void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer)
82 {
83         if(transfers.empty())
84                 return;
85
86         ++current_frame;
87         for(auto i=transfers.begin(); i!=transfers.end(); )
88         {
89                 auto j = i;
90                 for(; (j!=transfers.end() && j->order==i->order); ++j)
91                         j->synchronize();
92
93                 device.get_synchronizer().barrier(command_buffer);
94
95                 for(; i!=j; ++i)
96                 {
97                         VkBuffer buffer = (i->buffer_index>=0 ? buffers[i->buffer_index].buffer : 0);
98                         i->transfer(command_buffer, buffer, i->offset);
99                         if(i->buffer_index>=0)
100                                 buffers[i->buffer_index].last_frame = current_frame;
101                 }
102         }
103
104         transfers.clear();
105         next_orders.clear();
106
107         unsigned n_frames_in_flight = device.get_n_frames_in_flight();
108         for(StagingBuffer &b: buffers)
109                 if(!b.async_count && b.last_frame+n_frames_in_flight<current_frame)
110                         b.used = 0;
111 }
112
113
114 TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
115         device(d),
116         size(s)
117 {
118         const VulkanFunctions &vk = device.get_functions();
119
120         VkBufferCreateInfo buffer_info = { };
121         buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
122         buffer_info.size = size;
123         buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
124         buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
125
126         vk.CreateBuffer(buffer_info, buffer);
127
128         MemoryAllocator &allocator = device.get_allocator();
129         memory_id = allocator.allocate(buffer, STAGING_MEMORY);
130         mapped_address = allocator.map(memory_id);
131 }
132
133 TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
134         device(other.device),
135         buffer(other.buffer),
136         memory_id(other.memory_id),
137         size(other.size),
138         used(other.used),
139         mapped_address(other.mapped_address)
140 {
141         other.buffer = 0;
142         other.memory_id = 0;
143         other.mapped_address = 0;
144 }
145
146 TransferQueue::StagingBuffer::~StagingBuffer()
147 {
148         const VulkanFunctions &vk = device.get_functions();
149         MemoryAllocator &allocator = device.get_allocator();
150
151         if(mapped_address)
152         {
153                 allocator.unmap(memory_id);
154                 allocator.release(memory_id);
155         }
156         if(buffer)
157                 vk.DestroyBuffer(buffer);
158 }
159
160 } // namespace GL
161 } // namespace Msp