]> git.tdb.fi Git - libs/gl.git/blob - source/backends/vulkan/transferqueue.cpp
f356510e4b3a02d57dddb387825eb478ea88fa37
[libs/gl.git] / source / backends / vulkan / transferqueue.cpp
1 #include <msp/core/algorithm.h>
2 #include "device.h"
3 #include "transferqueue.h"
4 #include "vulkan.h"
5
6 using namespace std;
7
8 namespace Msp {
9 namespace GL {
10
11 TransferQueue::TransferQueue(Device &d):
12         device(d)
13 { }
14
15 TransferQueue::PendingTransfer &TransferQueue::prepare_transfer(const void *object, bool ordered, size_t size)
16 {
17         unsigned &order = next_orders[object];
18         order += !order;
19         order += (order&1)|ordered;
20
21         auto j = upper_bound_member(transfers, order, &PendingTransfer::order);
22
23         PendingTransfer &transfer = *transfers.emplace(j);
24         transfer.order = order;
25
26         if(size)
27         {
28                 auto i = find_if(buffers, [size](const StagingBuffer &b){ return b.used+size<=b.size; });
29                 if(i==buffers.end())
30                 {
31                         buffers.emplace_back(device, max(default_buffer_size, size));
32                         i = prev(buffers.end());
33                 }
34
35                 transfer.buffer_index = distance(buffers.begin(), i);
36                 transfer.offset = i->used;
37                 transfer.size = size;
38
39                 i->used += size;
40         }
41
42         order += ordered;
43
44         return transfer;
45 }
46
47 void TransferQueue::dispatch_transfers(VkCommandBuffer command_buffer)
48 {
49         if(transfers.empty())
50                 return;
51
52         for(auto i=transfers.begin(); i!=transfers.end(); )
53         {
54                 auto j = i;
55                 for(; (j!=transfers.end() && j->order==i->order); ++j)
56                         j->synchronize();
57
58                 device.get_synchronizer().barrier(command_buffer);
59
60                 for(; i!=j; ++i)
61                 {
62                         VkBuffer buffer = (i->buffer_index>=0 ? buffers[i->buffer_index].buffer : 0);
63                         i->transfer(command_buffer, buffer, i->offset);
64                 }
65         }
66
67         transfers.clear();
68         next_orders.clear();
69 }
70
71
72 TransferQueue::StagingBuffer::StagingBuffer(Device &d, size_t s):
73         device(d),
74         size(s)
75 {
76         const VulkanFunctions &vk = device.get_functions();
77
78         VkBufferCreateInfo buffer_info = { };
79         buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
80         buffer_info.size = size;
81         buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
82         buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
83
84         vk.CreateBuffer(buffer_info, buffer);
85
86         MemoryAllocator &allocator = device.get_allocator();
87         memory_id = allocator.allocate(buffer, STAGING_MEMORY);
88         mapped_address = allocator.map(memory_id, 0, size);
89 }
90
91 TransferQueue::StagingBuffer::StagingBuffer(StagingBuffer &&other):
92         device(other.device),
93         buffer(other.buffer),
94         memory_id(other.memory_id),
95         size(other.size),
96         used(other.used),
97         mapped_address(other.mapped_address)
98 {
99         other.buffer = 0;
100         other.memory_id = 0;
101         other.mapped_address = 0;
102 }
103
104 TransferQueue::StagingBuffer::~StagingBuffer()
105 {
106         const VulkanFunctions &vk = device.get_functions();
107         MemoryAllocator &allocator = device.get_allocator();
108
109         if(mapped_address)
110         {
111                 allocator.unmap(mapped_address);
112                 allocator.release(memory_id);
113         }
114         if(buffer)
115                 vk.DestroyBuffer(buffer);
116 }
117
118 } // namespace GL
119 } // namespace Msp