+ alloc_info.memoryTypeIndex = pool_index;
+
+ Region region;
+ vk.AllocateMemory(alloc_info, region.memory);
+
+ region.pool = pool_index;
+ region.direct = direct;
+ region.size = size;
+ regions.push_back(region);
+
+ return regions.size()-1;
+}
+
+vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size)
+{
+ return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size<s; });
+}
+
+unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType type)
+{
+ unsigned pool_index = find_memory_pool(type_bits, type);
+ Pool &pool = pools[pool_index];
+
+ if(size>=direct_alloc_threshold)
+ {
+ Block block;
+ block.region = create_region(pool_index, size, true);
+ block.size = size;
+ block.allocated = true;
+
+ blocks.push_back(block);
+ return blocks.size()-1;
+ }
+
+ if(pool.can_consolidate && blocks[pool.free_blocks.back()].size<size+align)
+ consolidate(pool_index);
+
+ auto i = lower_bound_by_size(pool.free_blocks, size);
+ for(; i!=pool.free_blocks.end(); ++i)
+ {
+ Block &block = blocks[*i];
+ size_t offset = align-1-(block.offset+align-1)%align;
+ if(offset+size<=block.size)
+ break;
+ }
+
+ unsigned block_index;
+ if(i!=pool.free_blocks.end())
+ {
+ block_index = *i;
+ pool.free_blocks.erase(i);
+ if(pool.free_blocks.empty())
+ pool.can_consolidate = false;
+ }
+ else
+ {
+ Block block;
+ block.region = create_region(pool_index, default_region_size, false);
+ block.size = default_region_size;
+
+ blocks.push_back(block);
+ block_index = blocks.size()-1;
+ }
+
+ size_t offset = align-1-(blocks[block_index].offset+align-1)%align;
+ if(offset)
+ {
+ unsigned head_index = block_index;
+ block_index = split_block(block_index, offset);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[head_index].size), head_index);
+ }
+
+ size += min_alignment-1;
+ size -= size%min_alignment;
+ if(blocks[block_index].size>=size+min_alignment)
+ {
+ unsigned tail_index = split_block(block_index, size);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[tail_index].size), tail_index);
+ }