+#include <msp/core/algorithm.h>
#include <msp/core/hash.h>
#include "batch.h"
#include "blend.h"
if(changes&PipelineState::VERTEX_SETUP)
self.vertex_setup->refresh();
+ if(changes&PipelineState::SHPROG)
+ {
+ push_const_compat = hash<32>(self.shprog->stage_flags);
+ push_const_compat = hash_update<32>(push_const_compat, self.shprog->get_push_constants_size());
+ }
+
constexpr unsigned pipeline_mask = PipelineState::SHPROG|PipelineState::VERTEX_SETUP|PipelineState::FACE_CULL|
PipelineState::DEPTH_TEST|PipelineState::STENCIL_TEST|PipelineState::BLEND|PipelineState::PRIMITIVE_TYPE;
if(changes&pipeline_mask)
if(changed_sets)
{
- descriptor_set_handles.resize(self.shprog->get_n_descriptor_sets());
- for(unsigned i=0; i<descriptor_set_handles.size(); ++i)
+ descriptor_set_slots.resize(self.shprog->get_n_descriptor_sets());
+ first_changed_desc_set = descriptor_set_slots.size();
+ for(unsigned i=0; i<descriptor_set_slots.size(); ++i)
if(changed_sets&(1<<i))
- descriptor_set_handles[i] = device.get_pipeline_cache().get_descriptor_set(self, i);
+ {
+ descriptor_set_slots[i] = device.get_descriptor_pool().get_descriptor_set_slot(self, i);
+ first_changed_desc_set = min(first_changed_desc_set, i);
+ }
+
unapplied |= PipelineState::UNIFORMS;
}
}
const PipelineState &self = *static_cast<const PipelineState *>(this);
uint64_t result = hash<64>(0, 0);
- for(const PipelineState::BoundUniformBlock &b: self.uniform_blocks)
- if(b.used && b.binding>=0 && static_cast<unsigned>(b.binding>>20)==index)
+ bool empty = true;
+
+ auto i = lower_bound_member(self.uniform_blocks, static_cast<int>(index)<<20, &PipelineState::BoundUniformBlock::binding);
+ for(; (i!=self.uniform_blocks.end() && static_cast<unsigned>(i->binding)>>20==index); ++i)
+ if(i->used)
{
- result = hash_update<64>(result, b.binding);
- result = hash_update<64>(result, reinterpret_cast<uintptr_t>(b.block));
- result = hash_update<64>(result, reinterpret_cast<uintptr_t>(b.buffer->handle));
+ result = hash_update<64>(result, i->binding);
+ result = hash_update<64>(result, reinterpret_cast<uintptr_t>(i->block));
+ result = hash_update<64>(result, reinterpret_cast<uintptr_t>(i->buffer->handle));
+ empty = false;
}
- for(const PipelineState::BoundTexture &t: self.textures)
- if(t.used && (t.binding>>20)==index)
+
+ auto j = lower_bound_member(self.textures, index<<20, &PipelineState::BoundTexture::binding);
+ for(; (j!=self.textures.end() && j->binding>>20==index); ++j)
+ if(j->used)
{
- result = hash_update<64>(result, t.binding);
- result = hash_update<64>(result, reinterpret_cast<uintptr_t>(t.texture->handle));
- result = hash_update<64>(result, reinterpret_cast<uintptr_t>(t.sampler->handle));
- result = hash_update<64>(result, t.level);
+ result = hash_update<64>(result, j->binding);
+ result = hash_update<64>(result, reinterpret_cast<uintptr_t>(j->texture->handle));
+ result = hash_update<64>(result, reinterpret_cast<uintptr_t>(j->sampler->handle));
+ result = hash_update<64>(result, j->level);
+ empty = false;
}
+ if(!empty)
+ result = hash_update<64>(result, self.shprog->stage_flags);
+
return result;
}
+bool VulkanPipelineState::is_descriptor_set_dynamic(unsigned index) const
+{
+ const PipelineState &self = *static_cast<const PipelineState *>(this);
+
+ auto i = lower_bound_member(self.uniform_blocks, static_cast<int>(index)<<20, &PipelineState::BoundUniformBlock::binding);
+ for(; (i!=self.uniform_blocks.end() && static_cast<unsigned>(i->binding)>>20==index); ++i)
+ if(i->used && i->buffer && i->buffer->get_usage()==STREAMING)
+ return true;
+
+ return false;
+}
+
VkDescriptorSetLayout VulkanPipelineState::get_descriptor_set_layout(unsigned index) const
{
return static_cast<const PipelineState *>(this)->shprog->desc_set_layout_handles[index];
}
-unsigned VulkanPipelineState::fill_descriptor_writes(unsigned index, vector<char> &buffer) const
+unsigned VulkanPipelineState::fill_descriptor_writes(unsigned index, unsigned frame, vector<char> &buffer) const
{
const PipelineState &self = *static_cast<const PipelineState *>(this);
+ auto u_begin = lower_bound_member(self.uniform_blocks, static_cast<int>(index)<<20, &PipelineState::BoundUniformBlock::binding);
+ auto t_begin = lower_bound_member(self.textures, index<<20, &PipelineState::BoundTexture::binding);
+
unsigned n_buffers = 0;
- for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
- if(u.used && u.binding>=0 && static_cast<unsigned>(u.binding>>20)==index)
+ for(auto i=u_begin; (i!=self.uniform_blocks.end() && static_cast<unsigned>(i->binding)>>20==index); ++i)
+ if(i->used)
++n_buffers;
unsigned n_images = 0;
- for(const PipelineState::BoundTexture &t: self.textures)
- if(t.used && (t.binding>>20)==index)
+ for(auto i=t_begin; (i!=self.textures.end() && i->binding>>20==index); ++i)
+ if(i->used)
++n_images;
unsigned n_writes = n_buffers+n_images;
VkDescriptorBufferInfo *buffer_ptr = buffers;
VkDescriptorImageInfo *image_ptr = images;
- for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
- if(u.used && u.binding>=0 && static_cast<unsigned>(u.binding>>20)==index)
+ for(auto i=u_begin; (i!=self.uniform_blocks.end() && static_cast<unsigned>(i->binding)>>20==index); ++i)
+ if(i->used)
{
- buffer_ptr->buffer = handle_cast<::VkBuffer>(u.buffer->handle);
- buffer_ptr->offset = u.block->get_offset();
- buffer_ptr->range = u.block->get_data_size();
+ buffer_ptr->buffer = handle_cast<::VkBuffer>(i->buffer->handle);
+ buffer_ptr->offset = i->block->get_offset();
+ if(i->buffer->get_usage()==STREAMING)
+ buffer_ptr->offset += frame*i->buffer->get_size();
+ buffer_ptr->range = i->block->get_data_size();
write_ptr->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- write_ptr->dstBinding = u.binding&0xFFFFF;
+ write_ptr->dstBinding = i->binding&0xFFFFF;
write_ptr->descriptorCount = 1;
- write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+ write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
write_ptr->pBufferInfo = buffer_ptr;
++buffer_ptr;
++write_ptr;
}
- for(const PipelineState::BoundTexture &t: self.textures)
- if(t.used && (t.binding>>20)==index)
+ for(auto i=t_begin; (i!=self.textures.end() && i->binding>>20==index); ++i)
+ if(i->used)
{
- image_ptr->sampler = handle_cast<::VkSampler>(t.sampler->handle);
- if(t.level<0)
- image_ptr->imageView = handle_cast<::VkImageView>(t.texture->view_handle);
+ image_ptr->sampler = handle_cast<::VkSampler>(i->sampler->handle);
+ if(i->level<0)
+ image_ptr->imageView = handle_cast<::VkImageView>(i->texture->view_handle);
else
- image_ptr->imageView = handle_cast<::VkImageView>(t.texture->mip_view_handles[t.level]);
+ image_ptr->imageView = handle_cast<::VkImageView>(i->texture->mip_view_handles[i->level]);
image_ptr->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
write_ptr->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- write_ptr->dstBinding = t.binding&0xFFFFF;
+ write_ptr->dstBinding = i->binding&0xFFFFF;
write_ptr->descriptorCount = 1;
write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
write_ptr->pImageInfo = image_ptr;
const VulkanFunctions &vk = device.get_functions();
if(!last)
+ {
unapplied = ~0U;
+ first_changed_desc_set = 0;
+ }
else if(last!=this)
{
const PipelineState &last_ps = *static_cast<const PipelineState *>(last);
if(handle!=last->handle)
+ {
unapplied |= PipelineState::SHPROG;
+ if(self.push_const_compat!=last_ps.push_const_compat)
+ {
+ unapplied |= PipelineState::UNIFORMS;
+ first_changed_desc_set = 0;
+ }
+ }
if(self.vertex_setup!=last_ps.vertex_setup)
unapplied |= PipelineState::VERTEX_SETUP;
- for(unsigned i=0; (i<descriptor_set_handles.size() && i<last->descriptor_set_handles.size()); ++i)
- if(descriptor_set_handles[i]!=last->descriptor_set_handles[i])
+ for(unsigned i=0; i<descriptor_set_slots.size(); ++i)
+ if(i>=last->descriptor_set_slots.size() || descriptor_set_slots[i]!=last->descriptor_set_slots[i])
{
unapplied |= PipelineState::UNIFORMS;
+ first_changed_desc_set = min(first_changed_desc_set, i);
break;
}
if(self.viewport!=last_ps.viewport)
}
}
- if((unapplied&PipelineState::UNIFORMS) && !descriptor_set_handles.empty())
+ if((unapplied&PipelineState::UNIFORMS) && !descriptor_set_slots.empty())
{
- vector<uint32_t> dynamic_offsets;
- dynamic_offsets.reserve(self.uniform_blocks.size());
- for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
- if(u.used && u.binding>=0)
- {
- if(u.buffer->get_usage()==STREAMING)
- dynamic_offsets.push_back(frame*u.buffer->get_size());
- else
- dynamic_offsets.push_back(0);
- }
+ vector<VkDescriptorSet> descriptor_set_handles;
+ descriptor_set_handles.reserve(descriptor_set_slots.size()-first_changed_desc_set);
+ for(unsigned i=first_changed_desc_set; i<descriptor_set_slots.size(); ++i)
+ descriptor_set_handles.push_back(device.get_descriptor_pool().get_descriptor_set(
+ self.descriptor_set_slots[i], self, i, frame));
vk.CmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.shprog->layout_handle,
- 0, descriptor_set_handles.size(), descriptor_set_handles.data(), dynamic_offsets.size(), dynamic_offsets.data());
+ first_changed_desc_set, descriptor_set_handles.size(), descriptor_set_handles.data(), 0, 0);
}
if(unapplied&(PipelineState::VIEWPORT|PipelineState::SCISSOR))
}
unapplied = 0;
+ first_changed_desc_set = descriptor_set_slots.size();
}
} // namespace GL