+ alloc_info.memoryTypeIndex = pool_index;
+
+ Region region;
+ vk.AllocateMemory(alloc_info, region.memory);
+
+ region.pool = pool_index;
+ region.direct = direct;
+ region.size = size;
+ regions.push_back(region);
+
+ return regions.size()-1;
+}
+
+vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size) const
+{
+ return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size<s; });
+}
+
+size_t MemoryAllocator::get_alloc_offset(const Block &block, size_t size, size_t align, BlockType type) const
+{
+ size_t offset = block.offset;
+ if(type!=block.type && block.prev>=0 && type!=blocks[block.prev].type)
+ {
+ offset += buffer_image_granularity-1;
+ offset -= offset%buffer_image_granularity;
+ }
+
+ offset += align-1;
+ offset -= offset%align;
+
+ if(type==BUFFER)
+ {
+ size_t offset2 = block.offset+block.size-size;
+ offset2 -= offset2%align;
+ offset = max(offset, offset2);
+ }
+
+ return offset-block.offset;
+}
+
+void MemoryAllocator::update_largest_free(Pool &pool)
+{
+ for(auto i=pool.free_blocks.end(); ((pool.largest_free_buffer<0 || pool.largest_free_image<0) && i!=pool.free_blocks.begin()); )
+ {
+ --i;
+ if(pool.largest_free_buffer<0 && (blocks[*i].type==BUFFER || blocks[*i].type==UNDECIDED))
+ pool.largest_free_buffer = *i;
+ if(pool.largest_free_image<0 && (blocks[*i].type==IMAGE || blocks[*i].type==UNDECIDED))
+ pool.largest_free_image = *i;
+ }
+}
+
+unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType mem_type, BlockType block_type)
+{
+ unsigned pool_index = find_memory_pool(type_bits, mem_type);
+ Pool &pool = pools[pool_index];
+
+ if(size>=direct_alloc_threshold)
+ {
+ Block block;
+ block.region = create_region(pool_index, size, true);
+ block.size = size;
+ block.allocated = true;
+ block.type = block_type;
+
+ blocks.push_back(block);
+ return blocks.size()-1;
+ }
+
+ int largest_free = (block_type==BUFFER ? pool.largest_free_buffer : pool.largest_free_image);
+ if(pool.can_consolidate && blocks[largest_free].size<size+align)
+ consolidate(pool_index);
+
+ auto i = lower_bound_by_size(pool.free_blocks, size);
+ for(; i!=pool.free_blocks.end(); ++i)
+ {
+ Block &block = blocks[*i];
+ if(block.type==UNDECIDED || block.type==block_type)
+ {
+ size_t offset = get_alloc_offset(block, size, align, block_type);
+ if(offset+size<=block.size)
+ break;
+ }
+ }
+
+ unsigned block_index;
+ if(i!=pool.free_blocks.end())
+ {
+ block_index = *i;
+ pool.free_blocks.erase(i);
+ if(pool.free_blocks.empty())
+ pool.can_consolidate = false;
+ if(static_cast<int>(block_index)==pool.largest_free_buffer)
+ pool.largest_free_buffer = -1;
+ if(static_cast<int>(block_index)==pool.largest_free_image)
+ pool.largest_free_image = -1;
+ }
+ else
+ {
+ Block block;
+ block.region = create_region(pool_index, default_region_size, false);
+ block.size = regions[block.region].size;
+
+ blocks.push_back(block);
+ block_index = blocks.size()-1;
+ }
+
+ size_t offset = get_alloc_offset(blocks[block_index], size, align, block_type);
+ if(offset)
+ {
+ unsigned head_index = block_index;
+ block_index = split_block(block_index, offset);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[head_index].size), head_index);
+ }
+
+ size += min_alignment-1;
+ size -= size%min_alignment;
+ if(blocks[block_index].size>=size+min_alignment)
+ {
+ unsigned tail_index = split_block(block_index, size);
+ pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[tail_index].size), tail_index);
+ }