]> git.tdb.fi Git - libs/gl.git/blob - source/backends/vulkan/synchronizer.cpp
Annotate uniforms with descriptor set layout qualifiers
[libs/gl.git] / source / backends / vulkan / synchronizer.cpp
1 #include <msp/core/algorithm.h>
2 #include "buffer.h"
3 #include "device.h"
4 #include "error.h"
5 #include "texture.h"
6 #include "synchronizer.h"
7 #include "vulkan.h"
8
9 using namespace std;
10
11 namespace Msp {
12 namespace GL {
13
14 Synchronizer::Synchronizer(Device &d):
15         device(d)
16 { }
17
18 void Synchronizer::write_buffer(VkBuffer buffer, size_t offset, size_t size, bool mapped)
19 {
20         auto i = lower_bound_member(buffer_accesses, buffer, &BufferAccess::buffer);
21         if(i==buffer_accesses.end() || i->buffer!=buffer)
22         {
23                 i = buffer_accesses.emplace(i);
24                 i->buffer = buffer;
25                 i->offset = offset;
26                 i->size = size;
27         }
28         else
29         {
30                 size_t begin = min(offset, i->offset);
31                 size_t end = max(offset+size, i->offset+i->size);
32                 i->offset = begin;
33                 i->size = end-begin;
34         }
35
36         if(mapped)
37                 i->was_written = true;
38         i->pending_write = true;
39 }
40
41 void Synchronizer::split_image_mipmap(VkImage image, unsigned aspect, unsigned n_levels)
42 {
43         if(!n_levels)
44                 throw invalid_argument("Synchronizer::split_image_mipmap");
45
46         auto i = lower_bound_member(image_accesses, image, &ImageAccess::image);
47         if(i!=image_accesses.end() && i->image==image && i->level>=0)
48                 return;
49
50         if(i!=image_accesses.end() && i->image==image && i->level==-1)
51         {
52                 i = image_accesses.insert(i, n_levels-1, *i);
53                 for(unsigned j=0; j<n_levels; ++i, ++j)
54                         i->level = j;
55         }
56         else
57         {
58                 ImageAccess access;
59                 access.image = image;
60                 access.aspect = aspect;
61                 access.current_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
62                 access.pending_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
63
64                 if(i->level==-2)
65                         i = image_accesses.erase(i);
66
67                 for(unsigned j=0; j<n_levels; ++i, ++j)
68                         if(i==image_accesses.end() || i->image!=image || i->level>static_cast<int>(j))
69                         {
70                                 i = image_accesses.insert(i, access);
71                                 i->level = j;
72                         }
73         }
74 }
75
76 void Synchronizer::change_image_layout(VkImage image, unsigned aspect, int level, unsigned layout, bool discard)
77 {
78         auto i = lower_bound_member(image_accesses, image, &ImageAccess::image);
79
80         if(level>=0)
81         {
82                 if(i==image_accesses.end() || i->image!=image)
83                 {
84                         i = image_accesses.emplace(i);
85                         i->image = image;
86                         i->level = -2;
87                         ++i;
88                 }
89                 else if(i->level==-1)
90                         throw invalid_operation("Synchronizer::change_image_layout");
91                 else
92                 {
93                         for(; (i!=image_accesses.end() && i->image==image && i->level<level); ++i) ;
94                 }
95         }
96         else if(i!=image_accesses.end() && i->image==image && i->level==-2)
97                 throw invalid_operation("Synchronizer::change_image_layout");
98
99         if(i==image_accesses.end() || i->image!=image || (level>=0 && i->level!=level))
100         {
101                 i = image_accesses.emplace(i);
102                 i->image = image;
103                 i->aspect = aspect;
104                 i->level = (level<0 ? -1 : level);
105                 i->current_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
106         }
107
108         for(; (i!=image_accesses.end() && i->image==image && (level<0 || i->level==level)); ++i)
109         {
110                 if(discard)
111                         i->current_layout = VK_IMAGE_LAYOUT_UNDEFINED;
112                 i->pending_layout = layout;
113         }
114 }
115
116 void Synchronizer::reset()
117 {
118         for(BufferAccess &b: buffer_accesses)
119                 b.pending_write = false;
120         for(ImageAccess &i: image_accesses)
121                 i.pending_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
122 }
123
124 void Synchronizer::barrier(VkCommandBuffer command_buffer)
125 {
126         const VulkanFunctions &vk = device.get_functions();
127
128         if(buffer_accesses.empty() && image_accesses.empty())
129                 return;
130
131         VkPipelineStageFlags src_stage = 0;
132         VkPipelineStageFlags dst_stage = 0;
133
134         static constexpr VkPipelineStageFlags buffer_read_stages = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT|
135                 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT|VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
136         static constexpr VkPipelineStageFlags buffer_write_stages = VK_PIPELINE_STAGE_TRANSFER_BIT|
137                 VK_PIPELINE_STAGE_HOST_BIT;
138
139         vector<VkBufferMemoryBarrier> buffer_barriers;
140         buffer_barriers.reserve(buffer_accesses.size());
141         for(BufferAccess &b: buffer_accesses)
142         {
143                 if(b.pending_write==b.was_written)
144                         continue;
145
146                 buffer_barriers.emplace_back(VkBufferMemoryBarrier{ });
147                 VkBufferMemoryBarrier &barrier = buffer_barriers.back();
148
149                 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
150                 barrier.srcAccessMask = (b.was_written ? VK_ACCESS_MEMORY_WRITE_BIT : 0);
151                 barrier.dstAccessMask = (b.pending_write ? VK_ACCESS_MEMORY_WRITE_BIT : VK_ACCESS_MEMORY_READ_BIT);
152                 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
153                 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
154                 barrier.buffer = handle_cast<::VkBuffer>(b.buffer);
155                 barrier.offset = b.offset;
156                 barrier.size = b.size;
157
158                 src_stage |= (b.was_written ? buffer_write_stages : buffer_read_stages);
159                 dst_stage |= (b.pending_write ? buffer_write_stages : buffer_read_stages);
160         }
161
162         static constexpr VkPipelineStageFlags image_read_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT|VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT|
163                 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT|VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
164         static constexpr VkPipelineStageFlags image_write_stages = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT|
165                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT|VK_PIPELINE_STAGE_TRANSFER_BIT;
166
167         vector<VkImageMemoryBarrier> image_barriers;
168         image_barriers.reserve(image_accesses.size());
169         for(const ImageAccess &i: image_accesses)
170         {
171                 if(i.level==-2 || i.pending_layout==i.current_layout)
172                         continue;
173
174                 image_barriers.emplace_back(VkImageMemoryBarrier{ });
175                 VkImageMemoryBarrier &barrier = image_barriers.back();
176
177                 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
178                 barrier.srcAccessMask = (is_write_layout(i.current_layout) ? VK_ACCESS_MEMORY_WRITE_BIT : 0);
179                 barrier.dstAccessMask = (is_write_layout(i.pending_layout) ? VK_ACCESS_MEMORY_WRITE_BIT : VK_ACCESS_MEMORY_READ_BIT);
180                 barrier.oldLayout = static_cast<VkImageLayout>(i.current_layout);
181                 barrier.newLayout = static_cast<VkImageLayout>(i.pending_layout);
182                 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
183                 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
184                 barrier.image = handle_cast<::VkImage>(i.image);
185                 barrier.subresourceRange.aspectMask = i.aspect;
186                 barrier.subresourceRange.baseMipLevel = max(i.level, 0);
187                 barrier.subresourceRange.levelCount = (i.level<0 ? VK_REMAINING_MIP_LEVELS : 1);
188                 barrier.subresourceRange.baseArrayLayer = 0;
189                 barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
190
191                 if(i.current_layout!=VK_IMAGE_LAYOUT_UNDEFINED)
192                         src_stage |= (is_write_layout(i.current_layout) ? image_write_stages : image_read_stages);
193                 dst_stage |= (is_write_layout(i.pending_layout) ? image_write_stages : image_read_stages);
194         }
195
196         if(buffer_barriers.empty() && image_barriers.empty())
197                 return;
198
199         if(!src_stage)
200                 src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
201         if(!dst_stage)
202                 dst_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
203
204         vk.CmdPipelineBarrier(command_buffer, src_stage, dst_stage, 0, 0, 0,
205                 buffer_barriers.size(), buffer_barriers.data(), image_barriers.size(), image_barriers.data());
206
207         for(auto i=buffer_accesses.begin(); i!=buffer_accesses.end(); )
208         {
209                 if(!i->pending_write)
210                         i = buffer_accesses.erase(i);
211                 else
212                 {
213                         i->was_written = i->pending_write;
214                         ++i;
215                 }
216         }
217
218         for(auto i=image_accesses.begin(); i!=image_accesses.end(); )
219         {
220                 if(i->level!=-1)
221                 {
222                         auto j = i;
223                         if(j->level==-2)
224                                 ++j;
225
226                         bool remove_image = true;
227                         for(; (j!=image_accesses.end() && j->image==i->image); ++j)
228                                 remove_image &= (j->pending_layout==VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
229
230                         if(remove_image)
231                                 i = image_accesses.erase(i, j);
232                         else
233                         {
234                                 for(; i!=j; ++i)
235                                         i->current_layout = i->pending_layout;
236                         }
237                 }
238                 else if(i->pending_layout==VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
239                         i = image_accesses.erase(i);
240                 else
241                 {
242                         i->current_layout = i->pending_layout;
243                         ++i;
244                 }
245         }
246 }
247
248 bool Synchronizer::is_write_layout(unsigned layout)
249 {
250         return layout==VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL || layout==VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
251                 layout==VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
252 }
253
254 } // namespace GL
255 } // namespace Msp