X-Git-Url: http://git.tdb.fi/?a=blobdiff_plain;f=source%2Fbackends%2Fvulkan%2Fmemoryallocator.cpp;h=227b54b6595904041cdff1d77c81b6944e0fa3b2;hb=738e2879b3cc64a7200f64e7a838704db82550b4;hp=6ef5d2bd7a6b8714fb545997819abb11e1995fa9;hpb=a16145549dc87c3b12671f797bd77b14bcc7786b;p=libs%2Fgl.git diff --git a/source/backends/vulkan/memoryallocator.cpp b/source/backends/vulkan/memoryallocator.cpp index 6ef5d2bd..227b54b6 100644 --- a/source/backends/vulkan/memoryallocator.cpp +++ b/source/backends/vulkan/memoryallocator.cpp @@ -1,4 +1,5 @@ #include +#include #include #include "device.h" #include "error.h" @@ -19,62 +20,220 @@ MemoryAllocator::MemoryAllocator(Device &d): VkPhysicalDeviceMemoryProperties mem_props; vk.GetPhysicalDeviceMemoryProperties(mem_props); + for(unsigned i=0; i::iterator MemoryAllocator::lower_bound_by_size(vector &indices, size_t size) +{ + return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size=direct_alloc_threshold) + { + Block block; + block.region = create_region(pool_index, size, true); + block.size = size; + block.allocated = true; + + blocks.push_back(block); + return blocks.size()-1; + } + + if(pool.can_consolidate && blocks[pool.free_blocks.back()].size=size+min_alignment) + { + unsigned tail_index = split_block(block_index, size); + pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[tail_index].size), tail_index); + } - Allocation alloc; - vk.AllocateMemory(alloc_info, alloc.memory); + blocks[block_index].allocated = true; - alloc.type = type; - alloc.size = size; - allocations.push_back(alloc); + return block_index; +} + +unsigned MemoryAllocator::split_block(unsigned index, size_t head_size) +{ + blocks.emplace_back(); + Block &block = blocks[index]; + Block &tail = blocks.back(); + unsigned tail_index = blocks.size()-1; + + tail.region = block.region; + tail.offset = block.offset+head_size; + tail.size = block.size-head_size; + tail.prev = index; + tail.next = block.next; + + block.size = head_size; + block.next = tail_index; - return allocations.size(); + return tail_index; } -MemoryAllocator::Allocation &MemoryAllocator::get_allocation(unsigned id) +void MemoryAllocator::consolidate(unsigned pool_index) { - return allocations[id-1]; + Pool &pool = pools[pool_index]; + + vector merged_blocks; + unsigned i = 0; + for(unsigned j=0; j=0 && !blocks[block.next].allocated) + { + merge_block_with_next(block_index); + + while(block.next>=0 && !blocks[block.next].allocated) + merge_block_with_next(block_index); + + merged_blocks.insert(lower_bound_by_size(merged_blocks, block.size), block_index); + } + } + else + continue; + } + + if(j!=i) + pool.free_blocks[i] = block_index; + ++i; + } + + pool.free_blocks.resize(i+merged_blocks.size()); + + if(!merged_blocks.empty()) + { + unsigned j = merged_blocks.size(); + for(unsigned k=pool.free_blocks.size()-1; j; --k) + { + if(!i || blocks[merged_blocks[j-1]].size>blocks[pool.free_blocks[i-1]].size) + pool.free_blocks[k] = merged_blocks[--j]; + else + pool.free_blocks[k] = pool.free_blocks[--i]; + } + } } -const MemoryAllocator::Allocation &MemoryAllocator::get_allocation(unsigned id) const +void MemoryAllocator::merge_block_with_next(unsigned index) { - return allocations[id-1]; + Block &block = blocks[index]; + + Block &next = blocks[block.next]; + block.size += next.size; + block.next = next.next; + if(block.next>=0) + blocks[block.next].prev = index; + + next = Block(); } unsigned MemoryAllocator::allocate(VkBuffer buffer, MemoryType type) @@ -84,11 +243,12 @@ unsigned MemoryAllocator::allocate(VkBuffer buffer, MemoryType type) VkMemoryRequirements requirements; vk.GetBufferMemoryRequirements(buffer, requirements); - unsigned id = allocate(requirements.size, requirements.memoryTypeBits, type); + unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type); - vk.BindBufferMemory(buffer, get_allocation(id).memory, 0); + Block &block = blocks[block_index]; + vk.BindBufferMemory(buffer, regions[block.region].memory, block.offset); - return id; + return block_index+1; } unsigned MemoryAllocator::allocate(VkImage image, MemoryType type) @@ -98,52 +258,75 @@ unsigned MemoryAllocator::allocate(VkImage image, MemoryType type) VkMemoryRequirements requirements; vk.GetImageMemoryRequirements(image, requirements); - unsigned id = allocate(requirements.size, requirements.memoryTypeBits, type); + unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type); - vk.BindImageMemory(image, get_allocation(id).memory, 0); + Block &block = blocks[block_index]; + vk.BindImageMemory(image, regions[block.region].memory, block.offset); - return id; + return block_index+1; } void MemoryAllocator::release(unsigned id) { - Allocation &alloc = get_allocation(id); - if(!alloc.memory) - throw invalid_operation("MemoryAllocator::release"); + if(!id || id>blocks.size() || !blocks[id-1].allocated) + throw key_error(id); - const VulkanFunctions &vk = device.get_functions(); + unsigned block_index = id-1; + Block &block = blocks[block_index]; - vk.FreeMemory(alloc.memory); -} + block.allocated = false; -size_t MemoryAllocator::get_allocation_size(unsigned id) const -{ - return get_allocation(id).size; + Region ®ion = regions[block.region]; + if(region.direct) + { + const VulkanFunctions &vk = device.get_functions(); + + vk.FreeMemory(region.memory); + region = Region(); + block = Block(); + return; + } + + Pool &pool = pools[region.pool]; + pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, block.size), block_index); + if((block.prev>=0 && !blocks[block.prev].allocated) || (block.next>=0 && !blocks[block.next].allocated)) + pool.can_consolidate = true; } -void *MemoryAllocator::map(unsigned id, size_t offset, size_t size) +void *MemoryAllocator::map(unsigned id) { - Allocation &alloc = get_allocation(id); - if(alloc.mapped_address) - throw invalid_operation("MemoryAllocator::map"); + if(!id || id>blocks.size() || !blocks[id-1].allocated) + throw key_error(id); - const VulkanFunctions &vk = device.get_functions(); + Block &block = blocks[id-1]; + Region ®ion = regions[block.region]; + if(!region.mapped_address) + { + const VulkanFunctions &vk = device.get_functions(); + vk.MapMemory(region.memory, 0, region.size, 0, ®ion.mapped_address); + } - vk.MapMemory(alloc.memory, offset, size, 0, &alloc.mapped_address); + ++region.map_count; - return alloc.mapped_address; + return static_cast(region.mapped_address)+block.offset; } -void MemoryAllocator::unmap(void *ptr) +void MemoryAllocator::unmap(unsigned id) { - auto i = find_member(allocations, ptr, &Allocation::mapped_address); - if(i==allocations.end()) - throw invalid_operation("MemoryAllocator::unmap"); + if(!id || id>blocks.size() || !blocks[id-1].allocated) + throw key_error(id); - const VulkanFunctions &vk = device.get_functions(); + Block &block = blocks[id-1]; + Region ®ion = regions[block.region]; - vk.UnmapMemory(i->memory); - i->mapped_address = 0; + if(!regions[block.region].mapped_address) + throw invalid_operation("MemoryAllocator::unmap"); + else if(!--region.map_count) + { + const VulkanFunctions &vk = device.get_functions(); + vk.UnmapMemory(region.memory); + region.mapped_address = 0; + } } } // namespace GL