#include <stdexcept>
#include <msp/gl/extensions/arb_direct_state_access.h>
#include <msp/gl/extensions/arb_map_buffer_range.h>
+#include <msp/strings/format.h>
#include "buffer.h"
#include "error.h"
#include "misc.h"
}
}
+void Buffer::require_size(unsigned req_sz) const
+{
+ if(size<req_sz)
+ throw buffer_too_small(format("buffer has %d bytes; %d required", size, req_sz));
+}
+
BufferRange *Buffer::create_range(unsigned s, unsigned o)
{
return new BufferRange(*this, s, o);
#ifndef MSP_GL_BUFFER_H_
#define MSP_GL_BUFFER_H_
+#include <stdexcept>
+#include <string>
#include <vector>
#include "gl.h"
#include <msp/gl/extensions/arb_pixel_buffer_object.h>
namespace Msp {
namespace GL {
+class buffer_too_small: public std::logic_error
+{
+public:
+ buffer_too_small(const std::string &w): std::logic_error(w) { }
+ virtual ~buffer_too_small() throw() { }
+};
+
enum BufferType
{
ARRAY_BUFFER = GL_ARRAY_BUFFER,
unsigned get_size() const { return size; }
+ void require_size(unsigned) const;
+
BufferRange *create_range(unsigned, unsigned);
void *map(BufferAccess);
update_offset();
}
+void Bufferable::buffer_resized()
+{
+ for(Bufferable *b=this; b; b=b->next_in_buffer)
+ {
+ b->location_dirty = true;
+ b->dirty = true;
+ }
+ for(Bufferable *b=prev_in_buffer; b; b=b->prev_in_buffer)
+ {
+ b->location_dirty = true;
+ b->dirty = true;
+ }
+}
+
+unsigned Bufferable::get_required_buffer_size() const
+{
+ const Bufferable *last = this;
+ for(; last->next_in_buffer; last=last->next_in_buffer) ;
+ return last->offset+last->get_data_size();
+}
+
Bufferable::AsyncUpdater *Bufferable::refresh_async() const
{
return dirty ? new AsyncUpdater(*this) : 0;
}
}
-bool Bufferable::resize_buffer() const
-{
- if(offset+get_data_size()>=buffer->get_size())
- {
- const Bufferable *last = this;
- for(; last->next_in_buffer; last=last->next_in_buffer) ;
-
- unsigned total_size = last->offset+last->get_data_size();
-
- if(total_size>buffer->get_size())
- {
- buffer->data(total_size, 0);
- return true;
- }
- }
-
- return false;
-}
-
-void Bufferable::update_buffer_size() const
-{
- if(resize_buffer())
- {
- Conditional<BindRestore> _bind(!ARB_direct_state_access, buffer, buffer->get_type());
-
- /* Resizing the buffer invalidates its contents. Non-dirty data may
- be in use, so reupload it. */
- for(const Bufferable *b=prev_in_buffer; b; b=b->prev_in_buffer)
- if(!b->dirty)
- b->upload_data(0);
- for(const Bufferable *b=next_in_buffer; b; b=b->next_in_buffer)
- if(!b->dirty)
- b->upload_data(0);
- }
-}
-
void Bufferable::upload_data(char *target) const
{
unsigned data_size = get_data_size();
if(location_dirty)
{
- update_buffer_size();
+ buffer->require_size(offset+data_size);
location_changed(buffer, offset, data_size);
location_dirty = false;
}
Bufferable::AsyncUpdater::AsyncUpdater(const Bufferable &b):
bufferable(b)
{
- buffer_resized = bufferable.resize_buffer();
+ bufferable.buffer->require_size(bufferable.get_required_buffer_size());
mapped_address = reinterpret_cast<char *>(bufferable.buffer->map(WRITE_ONLY));
}
bufferable.upload_data(mapped_address+bufferable.offset);
// Update all bufferables in the same buffer at once
for(const Bufferable *b=bufferable.prev_in_buffer; b; b=b->prev_in_buffer)
- if(b->dirty || buffer_resized)
+ if(b->dirty)
b->upload_data(mapped_address+b->offset);
for(const Bufferable *b=bufferable.next_in_buffer; b; b=b->next_in_buffer)
- if(b->dirty || buffer_resized)
+ if(b->dirty)
b->upload_data(mapped_address+b->offset);
}
private:
const Bufferable &bufferable;
char *mapped_address;
- bool buffer_resized;
public:
AsyncUpdater(const Bufferable &);
buffer, and this object is inserted after it. */
void use_buffer(Buffer *buf, Bufferable *prev = 0);
+ /** Informs the objects in this chain that the buffer has been resized and
+ data should be reuploaded. */
+ void buffer_resized();
+
+ /** Returns the total amount of storage required by this object and others
+ in the same chain, including any alignment between objects. */
+ unsigned get_required_buffer_size() const;
+
/** Uploads new data into the buffer if necessary. */
void refresh() const { if(buffer && dirty) upload_data(0); }
virtual void location_changed(Buffer *, unsigned, unsigned) const { }
private:
- bool resize_buffer() const;
-
- void update_buffer_size() const;
-
/** Uploads data to the buffer. Receives pointer to mapped buffer memory as
parameter. If null, buffer interface should be used instead. */
void upload_data(char *) const;
if(instance_data)
{
if(instance_data->size()<instances.size())
+ {
instance_data->append();
+ unsigned req_size = instance_data->get_required_buffer_size();
+ // XXX Inefficient, but will be rewritten imminently
+ if(instance_buffer->get_size()<req_size)
+ instance_buffer->data(req_size, 0);
+ }
update_instance_matrix(instances.size()-1);
}
}
{
vbuf = 0;
ibuf = 0;
+ dirty = 0;
disallow_rendering = false;
winding = 0;
batches.clear();
}
-void Mesh::create_buffers()
+void Mesh::check_buffers(unsigned mask)
{
- if(vbuf && ibuf)
- return;
-
- if(!vbuf)
- vbuf = new Buffer(ARRAY_BUFFER);
- vertices.use_buffer(vbuf);
-
- if(!ibuf)
- ibuf = new Buffer(ELEMENT_ARRAY_BUFFER);
+ if(mask&VERTEX_BUFFER)
+ {
+ if(!vbuf)
+ {
+ vbuf = new Buffer(ARRAY_BUFFER);
+ vertices.use_buffer(vbuf);
+ vtx_setup.set_vertex_array(vertices);
+ }
+ unsigned req_size = vertices.get_required_buffer_size();
+ if(vbuf->get_size()<req_size)
+ {
+ dirty |= VERTEX_BUFFER;
+ vertices.buffer_resized();
+ }
+ }
- vtx_setup.set_vertex_array(vertices);
- vtx_setup.set_index_buffer(*ibuf);
+ if(mask&INDEX_BUFFER)
+ {
+ if(!ibuf)
+ {
+ ibuf = new Buffer(ELEMENT_ARRAY_BUFFER);
+ if(!batches.empty())
+ batches.front().use_buffer(ibuf);
+ vtx_setup.set_index_buffer(*ibuf);
+ }
+ unsigned req_size = (batches.empty() ? 0 : batches.front().get_required_buffer_size());
+ if(ibuf->get_size()<req_size)
+ {
+ dirty |= INDEX_BUFFER;
+ if(!batches.empty())
+ batches.front().buffer_resized();
+ }
+ }
}
unsigned Mesh::get_n_vertices() const
void Mesh::add_batch(const Batch &b)
{
- create_buffers();
-
if(batches.empty())
{
batches.push_back(b);
- batches.back().use_buffer(ibuf);
+ if(ibuf)
+ batches.back().use_buffer(ibuf);
}
else if(batches.back().can_append(b.get_type()))
batches.back().append(b);
else
batches.back().use_buffer(ibuf, prev);
}
+
+ check_buffers(INDEX_BUFFER);
}
void Mesh::set_winding(const WindingTest *w)
return;
}
+ if(dirty)
+ resize_buffers();
+
renderer.set_vertex_setup(vs ? vs : &vtx_setup);
renderer.set_winding_test(winding);
}
}
+void Mesh::resize_buffers() const
+{
+ if(dirty&VERTEX_BUFFER)
+ vbuf->data(vertices.get_required_buffer_size(), 0);
+ if(dirty&INDEX_BUFFER)
+ ibuf->data(batches.front().get_required_buffer_size(), 0);
+ dirty = 0;
+}
+
Resource::AsyncLoader *Mesh::load(IO::Seekable &io, const Resources *)
{
return new AsyncLoader(*this, io);
for(vector<VertexComponent>::const_iterator i=c.begin(); i!=c.end(); ++i)
fmt = (fmt, *i);
obj.vertices.reset(fmt);
- if(obj.vbuf)
- // Set it again to force the vertex setup to update
- obj.vtx_setup.set_vertex_array(obj.vertices);
load_sub(obj.vertices);
+ obj.check_buffers(VERTEX_BUFFER);
}
void Mesh::Loader::batch(PrimitiveType p)
phase(0)
{
mesh.disallow_rendering = true;
- mesh.create_buffers();
+ mesh.check_buffers(VERTEX_BUFFER|INDEX_BUFFER);
}
Mesh::AsyncLoader::~AsyncLoader()
}
else if(phase==1)
{
+ mesh.resize_buffers();
vertex_updater = mesh.vertices.refresh_async();
if(!mesh.batches.empty())
index_updater = mesh.batches.front().refresh_async();
virtual bool process();
};
+ enum BufferMask
+ {
+ VERTEX_BUFFER = 1,
+ INDEX_BUFFER = 2
+ };
+
VertexArray vertices;
std::vector<Batch> batches;
Buffer *vbuf;
Buffer *ibuf;
VertexSetup vtx_setup;
- bool defer_buffers;
- mutable bool dirty;
+ mutable unsigned short dirty;
bool disallow_rendering;
const WindingTest *winding;
void clear();
private:
- void create_buffers();
+ void check_buffers(unsigned);
public:
const VertexArray &get_vertices() const { return vertices; }
void draw_instanced(Renderer &, const VertexSetup &, unsigned) const;
private:
void draw(Renderer &, const VertexSetup *, unsigned) const;
+ void resize_buffers() const;
public:
virtual int get_load_priority() const { return 1; }
offset(mesh.get_vertices().size());
}
+void MeshBuilder::vertex_(const Vector4 &v)
+{
+ PrimitiveBuilder::vertex_(v);
+ mesh.check_buffers(Mesh::VERTEX_BUFFER);
+}
+
void MeshBuilder::begin_()
{
batch = new Batch(type);
MeshBuilder(Mesh &);
void auto_offset();
private:
+ virtual void vertex_(const Vector4 &);
virtual void begin_();
virtual void end_();
virtual void element_(unsigned);
const Program::UniformBlockMap &prog_blocks = prog->get_uniform_blocks();
+ UniformBlock *old_last_block = last_block;
if(pu.dirty==ALL_ONES)
{
/* The set of uniforms has changed since this program was last used.
to avoid state thrashing. */
if(buffered_blocks_updated && !ARB_direct_state_access)
buffer->bind();
+
+ if(last_block!=old_last_block)
+ {
+ unsigned required_size = last_block->get_required_buffer_size();
+ if(last_block->get_required_buffer_size()>buffer->get_size())
+ buffer->data(required_size, 0);
+ }
}
for(vector<ProgramBlock>::iterator i=pu.blocks.begin(); i!=pu.blocks.end(); ++i)