+Mesh::~Mesh()
+{
+ set_manager(0);
+ delete vbuf;
+ delete ibuf;
+}
+
+void Mesh::clear()
+{
+ vertices.clear();
+ batches.clear();
+}
+
+void Mesh::check_buffers(unsigned mask)
+{
+ if(mask&VERTEX_BUFFER)
+ {
+ unsigned req_size = vertices.get_required_buffer_size();
+ if(!vbuf || (vbuf->get_size()>0 && vbuf->get_size()<req_size))
+ {
+ delete vbuf;
+ vbuf = new Buffer(ARRAY_BUFFER);
+ vertices.use_buffer(vbuf);
+ vtx_setup.set_vertex_array(vertices);
+ dirty |= VERTEX_BUFFER;
+ }
+ }
+
+ if(mask&INDEX_BUFFER)
+ {
+ unsigned req_size = (batches.empty() ? 0 : batches.front().get_required_buffer_size());
+ if(!ibuf || (ibuf->get_size()>0 && ibuf->get_size()<req_size))
+ {
+ delete ibuf;
+ ibuf = new Buffer(ELEMENT_ARRAY_BUFFER);
+ if(!batches.empty())
+ batches.front().change_buffer(ibuf);
+ vtx_setup.set_index_buffer(*ibuf);
+ dirty |= INDEX_BUFFER;
+ }
+ }
+}
+
+unsigned Mesh::get_n_vertices() const
+{
+ return vertices.size();
+}
+
+float *Mesh::modify_vertex(unsigned i)
+{
+ return vertices.modify(i);
+}
+
+void Mesh::add_batch(const Batch &b)
+{
+ if(batches.empty())
+ {
+ batches.push_back(b);
+ if(ibuf)
+ batches.back().use_buffer(ibuf);
+ }
+ else if(batches.back().can_append(b.get_type()))
+ batches.back().append(b);
+ else
+ {
+ bool reallocate = (batches.size()==batches.capacity());
+ if(reallocate)
+ {
+ for(vector<Batch>::iterator i=batches.end(); i!=batches.begin(); )
+ (--i)->use_buffer(0);
+ }
+
+ Batch *prev = &batches.back();
+ batches.push_back(b);
+ if(reallocate)
+ {
+ prev = 0;
+ for(vector<Batch>::iterator i=batches.begin(); i!=batches.end(); ++i)
+ {
+ i->use_buffer(ibuf, prev);
+ prev = &*i;
+ }
+ }
+ else
+ batches.back().use_buffer(ibuf, prev);
+ }
+
+ check_buffers(INDEX_BUFFER);
+}
+
+void Mesh::set_winding(const WindingTest *w)
+{
+ winding = w;
+}
+
+void Mesh::draw(Renderer &renderer) const
+{
+ draw(renderer, 0, 0);
+}
+
+void Mesh::draw_instanced(Renderer &renderer, const VertexSetup &vs, unsigned count) const
+{
+ if(vs.get_vertex_array()!=&vertices)
+ throw invalid_argument("Mesh::draw_instanced");
+
+ draw(renderer, &vs, count);
+}
+
+void Mesh::draw(Renderer &renderer, const VertexSetup *vs, unsigned count) const
+{
+ if(manager)
+ {
+ manager->resource_used(*this);
+ if(disallow_rendering)
+ return;
+ }
+
+ if(dirty)
+ resize_buffers();
+
+ renderer.set_vertex_setup(vs ? vs : &vtx_setup);
+ renderer.set_winding_test(winding);
+
+ if(!count)
+ {
+ for(vector<Batch>::const_iterator i=batches.begin(); i!=batches.end(); ++i)
+ renderer.draw(*i);
+ }
+ else
+ {
+ for(vector<Batch>::const_iterator i=batches.begin(); i!=batches.end(); ++i)
+ renderer.draw_instanced(*i, count);
+ }
+}
+
+void Mesh::resize_buffers() const
+{
+ if(dirty&VERTEX_BUFFER)
+ vbuf->storage(vertices.get_required_buffer_size());
+ if(dirty&INDEX_BUFFER)
+ ibuf->storage(batches.front().get_required_buffer_size());
+ dirty = 0;
+}
+
+Resource::AsyncLoader *Mesh::load(IO::Seekable &io, const Resources *)
+{
+ return new AsyncLoader(*this, io);
+}
+
+UInt64 Mesh::get_data_size() const
+{
+ UInt64 size = 0;
+ if(vbuf)
+ size += vbuf->get_size();
+ if(ibuf)
+ size += ibuf->get_size();
+ return size;
+}
+
+void Mesh::unload()
+{
+ vertices.clear();
+ vertices.use_buffer(0);
+ batches.clear();
+ delete vbuf;
+ delete ibuf;
+ vbuf = 0;
+ ibuf = 0;
+}
+
+
+Mesh::Loader::Loader(Mesh &m, bool g):
+ DataFile::ObjectLoader<Mesh>(m),
+ allow_gl_calls(g)