+vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size) const
+{
+ return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size<s; });
+}
+
+unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType type)
+{
+ unsigned pool_index = find_memory_pool(type_bits, type);
+ Pool &pool = pools[pool_index];
+
+ if(size>=direct_alloc_threshold)
+ {
+ Block block;
+ block.region = create_region(pool_index, size, true);
+ block.size = size;
+ block.allocated = true;
+
+ blocks.push_back(block);
+ return blocks.size()-1;
+ }
+
+ if(pool.can_consolidate && blocks[pool.free_blocks.back()].size<size+align)
+ consolidate(pool_index);
+
+ auto i = lower_bound_by_size(pool.free_blocks, size);
+ for(; i!=pool.free_blocks.end(); ++i)
+ {
+ Block &block = blocks[*i];
+ size_t offset = align-1-(block.offset+align-1)%align;
+ if(offset+size<=block.size)
+ break;
+ }
+
+ unsigned block_index;
+ if(i!=pool.free_blocks.end())
+ {
+ block_index = *i;
+ pool.free_blocks.erase(i);
+ if(pool.free_blocks.empty())
+ pool.can_consolidate = false;
+ }
+ else
+ {
+ Block block;
+ block.region = create_region(pool_index, default_region_size, false);
+ block.size = regions[block.region].size;
+
+ blocks.push_back(block);
+ block_index = blocks.size()-1;
+ }
+
+ size_t offset = align-1-(blocks[block_index].offset+align-1)%align;
+ if(offset)
+ {
+ unsigned head_index = block_index;
+ block_index = split_block(block_index, offset);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[head_index].size), head_index);
+ }
+
+ size += min_alignment-1;
+ size -= size%min_alignment;
+ if(blocks[block_index].size>=size+min_alignment)
+ {
+ unsigned tail_index = split_block(block_index, size);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[tail_index].size), tail_index);
+ }
+
+ blocks[block_index].allocated = true;
+
+ return block_index;
+}
+
+unsigned MemoryAllocator::split_block(unsigned index, size_t head_size)
+{
+ blocks.emplace_back();
+ Block &block = blocks[index];
+ Block &tail = blocks.back();
+ unsigned tail_index = blocks.size()-1;
+
+ tail.region = block.region;
+ tail.offset = block.offset+head_size;
+ tail.size = block.size-head_size;
+ tail.prev = index;
+ tail.next = block.next;
+
+ block.size = head_size;
+ block.next = tail_index;
+
+ return tail_index;
+}
+
+void MemoryAllocator::consolidate(unsigned pool_index)
+{
+ Pool &pool = pools[pool_index];
+
+ vector<unsigned> merged_blocks;
+ unsigned i = 0;
+ for(unsigned j=0; j<pool.free_blocks.size(); ++j)
+ {
+ unsigned block_index = pool.free_blocks[j];
+ Block &block = blocks[block_index];
+ if(!block.allocated)
+ {
+ if(block.prev<0 || blocks[block.prev].allocated)
+ {
+ if(block.next>=0 && !blocks[block.next].allocated)
+ {
+ merge_block_with_next(block_index);
+
+ while(block.next>=0 && !blocks[block.next].allocated)
+ merge_block_with_next(block_index);
+
+ merged_blocks.insert(lower_bound_by_size(merged_blocks, block.size), block_index);
+ }
+ }
+ else
+ continue;
+ }
+
+ if(j!=i)
+ pool.free_blocks[i] = block_index;
+ ++i;
+ }
+
+ pool.free_blocks.resize(i+merged_blocks.size());
+
+ if(!merged_blocks.empty())
+ {
+ unsigned j = merged_blocks.size();
+ for(unsigned k=pool.free_blocks.size()-1; j; --k)
+ {
+ if(!i || blocks[merged_blocks[j-1]].size>blocks[pool.free_blocks[i-1]].size)
+ pool.free_blocks[k] = merged_blocks[--j];
+ else
+ pool.free_blocks[k] = pool.free_blocks[--i];
+ }
+ }
+}
+
+void MemoryAllocator::merge_block_with_next(unsigned index)
+{
+ Block &block = blocks[index];
+
+ Block &next = blocks[block.next];
+ block.size += next.size;
+ block.next = next.next;
+ if(block.next>=0)
+ blocks[block.next].prev = index;
+
+ next = Block();