void OpenGLBuffer::allocate()
{
- size_t size = static_cast<const Buffer *>(this)->size;
+ size_t size = static_cast<const Buffer *>(this)->get_total_size();
if(ARB_buffer_storage)
{
void allocate();
void sub_data(size_t, size_t, const void *);
+ unsigned get_multiplicity() const { return 1; }
+
bool can_map() const { return true; }
void *map();
bool unmap();
VkBufferCreateInfo buffer_info = { };
buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- buffer_info.size = self.size;
+ buffer_info.size = self.get_total_size();
buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT|
VK_BUFFER_USAGE_INDEX_BUFFER_BIT|VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
tq.finalize_transfer(staging);
}
+unsigned VulkanBuffer::get_multiplicity() const
+{
+ BufferUsage usage = static_cast<const Buffer *>(this)->usage;
+ return (usage==STREAMING ? device.get_n_frames_in_flight() : 1);
+}
+
bool VulkanBuffer::can_map() const
{
return static_cast<const Buffer *>(this)->usage==STREAMING;
void allocate();
void sub_data(std::size_t, std::size_t, const void *);
+ unsigned get_multiplicity() const;
+
bool can_map() const;
void *map();
bool unmap();
void VulkanCommands::begin_buffer(VkRenderPass render_pass)
{
- if(!current_pool)
+ if(frame_index>=command_pools.size())
throw invalid_operation("VulkanCommands::begin_buffer");
const VulkanFunctions &vk = device.get_functions();
+ CommandPool *current_pool = &command_pools[frame_index];
if(!current_pool->in_use)
{
current_pool->fence.reset();
{
const VulkanFunctions &vk = device.get_functions();
- unsigned pool_index = index%device.get_n_frames_in_flight();
- if(pool_index>=command_pools.size())
+ frame_index = index%device.get_n_frames_in_flight();
+ if(frame_index>=command_pools.size())
{
- command_pools.reserve(pool_index+1);
- for(unsigned i=command_pools.size(); i<pool_index+1; ++i)
+ command_pools.reserve(frame_index+1);
+ for(unsigned i=command_pools.size(); i<frame_index+1; ++i)
command_pools.emplace_back(device);
}
- current_pool = &command_pools[pool_index];
+ CommandPool *current_pool = &command_pools[frame_index];
if(current_pool->in_use)
{
current_pool->fence.wait();
submit_info.signalSemaphoreCount = (signal_sem ? 1 : 0);
submit_info.pSignalSemaphores = &vk_signal_sem;
- vk.QueueSubmit(1, &submit_info, current_pool->fence.handle);
+ vk.QueueSubmit(1, &submit_info, command_pools[frame_index].fence.handle);
primary_buffer = 0;
}
begin_render_pass(false, 0);
pipeline_state->refresh();
- pipeline_state->apply(pass_buffer, fb_is_swapchain);
+ pipeline_state->apply(pass_buffer, frame_index, fb_is_swapchain);
unsigned first_index = batch.get_offset()/batch.get_index_size();
vk.CmdDrawIndexed(pass_buffer, batch.size(), count, first_index, 0, 0);
}
Device &device;
std::vector<CommandPool> command_pools;
- CommandPool *current_pool = 0;
+ unsigned frame_index = 0;
VkCommandBuffer primary_buffer = 0;
VkCommandBuffer pass_buffer = 0;
const PipelineState *pipeline_state = 0;
write_ptr->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_ptr->dstBinding = u.binding&0xFFFFF;
write_ptr->descriptorCount = 1;
- write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write_ptr->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
write_ptr->pBufferInfo = buffer_ptr;
++buffer_ptr;
return n_writes;
}
-void VulkanPipelineState::apply(VkCommandBuffer command_buffer, bool negative_viewport) const
+void VulkanPipelineState::apply(VkCommandBuffer command_buffer, unsigned frame, bool negative_viewport) const
{
const PipelineState &self = *static_cast<const PipelineState *>(this);
const VulkanFunctions &vk = device.get_functions();
}
if(!descriptor_set_handles.empty())
- vk.CmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.shprog->layout_handle, 0, descriptor_set_handles.size(), descriptor_set_handles.data(), 0, 0);
+ {
+ vector<uint32_t> dynamic_offsets;
+ dynamic_offsets.reserve(self.uniform_blocks.size());
+ for(const PipelineState::BoundUniformBlock &u: self.uniform_blocks)
+ if(u.used && u.binding>=0)
+ {
+ if(u.buffer->get_usage()==STREAMING)
+ dynamic_offsets.push_back(frame*u.buffer->get_size());
+ else
+ dynamic_offsets.push_back(0);
+ }
+
+ vk.CmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.shprog->layout_handle,
+ 0, descriptor_set_handles.size(), descriptor_set_handles.data(), dynamic_offsets.size(), dynamic_offsets.data());
+ }
VkViewport viewport = { };
if(self.viewport)
VkDescriptorSetLayout get_descriptor_set_layout(unsigned) const;
unsigned fill_descriptor_writes(unsigned, std::vector<char> &) const;
- void apply(VkCommandBuffer, bool) const;
+ void apply(VkCommandBuffer, unsigned, bool) const;
};
using PipelineStateBackend = VulkanPipelineState;
bindings.emplace_back();
VkDescriptorSetLayoutBinding &binding = bindings.back();
binding.binding = b.bind_point;
- binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
binding.descriptorCount = 1;
binding.stageFlags = VK_SHADER_STAGE_ALL;
binding.pImmutableSamplers = 0;
{
if(size==0)
throw invalid_operation("Buffer::sub_data");
- if(off>size || off+sz>size)
+ if(off>get_total_size() || off%size+sz>size)
throw out_of_range("Buffer::sub_data");
BufferBackend::sub_data(off, sz, d);
void sub_data(std::size_t, std::size_t, const void *);
std::size_t get_size() const { return size; }
+ using BufferBackend::get_multiplicity;
+ std::size_t get_total_size() const { return size*get_multiplicity(); }
BufferUsage get_usage() const { return usage; }
void require_size(std::size_t) const;
void Bufferable::mark_dirty()
{
- dirty = true;
+ dirty = 0xFF;
}
-void Bufferable::upload_data(char *target) const
+void Bufferable::upload_data(unsigned frame, char *target) const
{
if(!buffer)
throw invalid_operation("Bufferable::upload_data");
+ unsigned multi_buf = buffer->get_multiplicity();
+ frame %= multi_buf;
+ uint8_t mask = 1<<frame;
+ if(!(dirty&mask))
+ return;
+
size_t data_size = get_data_size();
if(location_dirty)
{
copy(source, source+data_size, target);
}
else
- buffer->sub_data(offset, data_size, get_data_pointer());
- dirty = false;
+ buffer->sub_data(frame*buffer->get_size()+offset, data_size, get_data_pointer());
+ dirty &= ~mask;
+ if(!(dirty&((1<<multi_buf)-1)))
+ dirty = 0;
+}
+
+Bufferable::AsyncUpdater *Bufferable::create_async_updater() const
+{
+ if(!buffer || buffer->get_multiplicity()>1)
+ throw invalid_operation("Bufferable::create_async_updater");
+ return new AsyncUpdater(*this);
}
void Bufferable::AsyncUpdater::upload_data()
{
- bufferable.upload_data(mapped_address+bufferable.offset);
+ bufferable.upload_data(0, mapped_address+bufferable.offset);
// Update all bufferables in the same buffer at once
for(const Bufferable *b=bufferable.prev_in_buffer; b; b=b->prev_in_buffer)
if(b->dirty)
- b->upload_data(mapped_address+b->offset);
+ b->upload_data(0, mapped_address+b->offset);
for(const Bufferable *b=bufferable.next_in_buffer; b; b=b->next_in_buffer)
if(b->dirty)
- b->upload_data(mapped_address+b->offset);
+ b->upload_data(0, mapped_address+b->offset);
}
} // namespace GL
std::size_t get_required_buffer_size(bool = false) const;
/** Uploads new data into the buffer if necessary. */
- void refresh(unsigned f) const { if(dirty&(1<<f)) upload_data(f, 0); }
+ void refresh(unsigned f) const { if(dirty) upload_data(f, 0); }
/** Returns an object which can be used to upload data to the buffer using
mapped memory. If data is not dirty, returns null. */
return prog_begin;
}
-void ProgramData::apply(const Program &prog, PipelineState &state) const
+void ProgramData::apply(const Program &prog, PipelineState &state, unsigned frame) const
{
auto prog_begin = prepare_program(prog);
ReflectData::LayoutHash prog_hash = prog_begin->prog_hash;
+
for(auto i=prog_begin+1; (i!=programs.end() && i->prog_hash==prog_hash); ++i)
if(i->block)
{
state.set_uniform_block(i->bind_point, i->block);
if(i->bind_point>=0)
- i->block->refresh();
+ i->block->refresh(frame);
}
}
public:
/** Creates or updates UniformBlocks for a specific program if necessary,
then sets them to the PipelineState. */
- void apply(const Program &, PipelineState &) const;
+ void apply(const Program &, PipelineState &, unsigned) const;
void set_debug_name(const std::string &);
};
void Renderer::draw(const Batch &batch)
{
apply_state();
- batch.refresh();
+ batch.refresh(frame_index);
pipeline_state.set_primitive_type(batch.get_type());
commands.use_pipeline(&pipeline_state);
commands.draw(batch);
void Renderer::draw_instanced(const Batch &batch, unsigned count)
{
apply_state();
- batch.refresh();
+ batch.refresh(frame_index);
pipeline_state.set_primitive_type(batch.get_type());
commands.use_pipeline(&pipeline_state);
commands.draw_instanced(batch, count);
shdata_stack.erase(shdata_stack.begin()+state.shdata_count, shdata_stack.end());
for(const BoundProgramData &d: shdata_stack)
{
- d.shdata->apply(*state.shprog, pipeline_state);
+ d.shdata->apply(*state.shprog, pipeline_state, frame_index);
d.generation = d.shdata->get_generation();
}
changed &= ~SHADER_DATA;
if(state.vertex_setup)
{
if(const VertexArray *array = state.vertex_setup->get_vertex_array())
- array->refresh();
+ array->refresh(frame_index);
if(const VertexArray *array = state.vertex_setup->get_instance_array())
- array->refresh();
+ array->refresh(frame_index);
}
pipeline_state.set_vertex_setup(state.vertex_setup);