1 #include <msp/core/algorithm.h>
2 #include <msp/core/maputils.h>
3 #include <msp/graphics/vulkancontext_platform.h>
6 #include "memoryallocator.h"
14 MemoryAllocator::MemoryAllocator(Device &d):
16 phys_device(handle_cast<VkPhysicalDevice>(device.get_context().get_private().physical_device))
18 const VulkanFunctions &vk = device.get_functions();
20 VkPhysicalDeviceMemoryProperties mem_props;
21 vk.GetPhysicalDeviceMemoryProperties(mem_props);
23 for(unsigned i=0; i<mem_props.memoryHeapCount; ++i)
24 if(mem_props.memoryHeaps[i].flags&VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
25 total_device_memory += mem_props.memoryHeaps[i].size;
27 default_region_size = total_device_memory/256;
28 default_region_size -= default_region_size%min_alignment;
29 direct_alloc_threshold = default_region_size/4;
31 const VkMemoryPropertyFlags host_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
32 pools.resize(mem_props.memoryTypeCount);
33 for(unsigned i=0; i<mem_props.memoryTypeCount; ++i)
35 VkMemoryPropertyFlags flags = mem_props.memoryTypes[i].propertyFlags;
36 if(flags&VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
38 if((flags&host_flags)==host_flags)
39 pools[i].type = STREAMING_MEMORY;
41 pools[i].type = DEVICE_MEMORY;
43 else if((flags&host_flags)==host_flags)
44 pools[i].type = STAGING_MEMORY;
48 MemoryAllocator::~MemoryAllocator()
50 const VulkanFunctions &vk = device.get_functions();
52 for(Region &r: regions)
54 vk.FreeMemory(r.memory);
57 unsigned MemoryAllocator::find_memory_pool(unsigned mask, MemoryType type)
59 for(unsigned i=0; i<pools.size(); ++i)
60 if((mask&(1<<i)) && pools[i].type==type)
62 if(type==DEVICE_MEMORY || type==STAGING_MEMORY)
63 return find_memory_pool(mask, STREAMING_MEMORY);
64 throw runtime_error("Unable to find suitable memory type");
67 unsigned MemoryAllocator::create_region(unsigned pool_index, size_t size, bool direct)
69 const VulkanFunctions &vk = device.get_functions();
71 VkMemoryAllocateInfo alloc_info = { };
72 alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
73 alloc_info.allocationSize = size;
74 alloc_info.memoryTypeIndex = pool_index;
77 vk.AllocateMemory(alloc_info, region.memory);
79 region.pool = pool_index;
80 region.direct = direct;
82 regions.push_back(region);
84 return regions.size()-1;
87 vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size)
89 return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size<s; });
92 unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType type)
94 unsigned pool_index = find_memory_pool(type_bits, type);
95 Pool &pool = pools[pool_index];
97 if(size>=direct_alloc_threshold)
100 block.region = create_region(pool_index, size, true);
102 block.allocated = true;
104 blocks.push_back(block);
105 return blocks.size()-1;
108 if(pool.can_consolidate && blocks[pool.free_blocks.back()].size<size+align)
109 consolidate(pool_index);
111 auto i = lower_bound_by_size(pool.free_blocks, size);
112 for(; i!=pool.free_blocks.end(); ++i)
114 Block &block = blocks[*i];
115 size_t offset = align-1-(block.offset+align-1)%align;
116 if(offset+size<=block.size)
120 unsigned block_index;
121 if(i!=pool.free_blocks.end())
124 pool.free_blocks.erase(i);
125 if(pool.free_blocks.empty())
126 pool.can_consolidate = false;
131 block.region = create_region(pool_index, default_region_size, false);
132 block.size = default_region_size;
134 blocks.push_back(block);
135 block_index = blocks.size()-1;
138 size_t offset = align-1-(blocks[block_index].offset+align-1)%align;
141 unsigned head_index = block_index;
142 block_index = split_block(block_index, offset);
143 pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[head_index].size), head_index);
146 size += min_alignment-1;
147 size -= size%min_alignment;
148 if(blocks[block_index].size>=size+min_alignment)
150 unsigned tail_index = split_block(block_index, size);
151 pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, blocks[tail_index].size), tail_index);
154 blocks[block_index].allocated = true;
159 unsigned MemoryAllocator::split_block(unsigned index, size_t head_size)
161 blocks.emplace_back();
162 Block &block = blocks[index];
163 Block &tail = blocks.back();
164 unsigned tail_index = blocks.size()-1;
166 tail.region = block.region;
167 tail.offset = block.offset+head_size;
168 tail.size = block.size-head_size;
170 tail.next = block.next;
172 block.size = head_size;
173 block.next = tail_index;
178 void MemoryAllocator::consolidate(unsigned pool_index)
180 Pool &pool = pools[pool_index];
182 vector<unsigned> merged_blocks;
184 for(unsigned j=0; j<pool.free_blocks.size(); ++j)
186 unsigned block_index = pool.free_blocks[j];
187 Block &block = blocks[block_index];
190 if(block.prev<0 || blocks[block.prev].allocated)
192 if(block.next>=0 && !blocks[block.next].allocated)
194 merge_block_with_next(block_index);
196 while(block.next>=0 && !blocks[block.next].allocated)
197 merge_block_with_next(block_index);
199 merged_blocks.insert(lower_bound_by_size(merged_blocks, block.size), block_index);
207 pool.free_blocks[i] = block_index;
211 pool.free_blocks.resize(i+merged_blocks.size());
213 if(!merged_blocks.empty())
215 unsigned j = merged_blocks.size();
216 for(unsigned k=pool.free_blocks.size()-1; j; --k)
218 if(!i || blocks[merged_blocks[j-1]].size>blocks[pool.free_blocks[i-1]].size)
219 pool.free_blocks[k] = merged_blocks[--j];
221 pool.free_blocks[k] = pool.free_blocks[--i];
226 void MemoryAllocator::merge_block_with_next(unsigned index)
228 Block &block = blocks[index];
230 Block &next = blocks[block.next];
231 block.size += next.size;
232 block.next = next.next;
234 blocks[block.next].prev = index;
239 unsigned MemoryAllocator::allocate(VkBuffer buffer, MemoryType type)
241 const VulkanFunctions &vk = device.get_functions();
243 VkMemoryRequirements requirements;
244 vk.GetBufferMemoryRequirements(buffer, requirements);
246 unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type);
248 Block &block = blocks[block_index];
249 vk.BindBufferMemory(buffer, regions[block.region].memory, block.offset);
251 return block_index+1;
254 unsigned MemoryAllocator::allocate(VkImage image, MemoryType type)
256 const VulkanFunctions &vk = device.get_functions();
258 VkMemoryRequirements requirements;
259 vk.GetImageMemoryRequirements(image, requirements);
261 unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type);
263 Block &block = blocks[block_index];
264 vk.BindImageMemory(image, regions[block.region].memory, block.offset);
266 return block_index+1;
269 void MemoryAllocator::release(unsigned id)
271 if(!id || id>blocks.size() || !blocks[id-1].allocated)
274 unsigned block_index = id-1;
275 Block &block = blocks[block_index];
277 block.allocated = false;
279 Region ®ion = regions[block.region];
282 const VulkanFunctions &vk = device.get_functions();
284 vk.FreeMemory(region.memory);
290 Pool &pool = pools[region.pool];
291 pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, block.size), block_index);
292 if((block.prev>=0 && !blocks[block.prev].allocated) || (block.next>=0 && !blocks[block.next].allocated))
293 pool.can_consolidate = true;
296 void *MemoryAllocator::map(unsigned id)
298 if(!id || id>blocks.size() || !blocks[id-1].allocated)
301 Block &block = blocks[id-1];
302 Region ®ion = regions[block.region];
303 if(!region.mapped_address)
305 const VulkanFunctions &vk = device.get_functions();
306 vk.MapMemory(region.memory, 0, region.size, 0, ®ion.mapped_address);
311 return static_cast<char *>(region.mapped_address)+block.offset;
314 void MemoryAllocator::unmap(unsigned id)
316 if(!id || id>blocks.size() || !blocks[id-1].allocated)
319 Block &block = blocks[id-1];
320 Region ®ion = regions[block.region];
322 if(!regions[block.region].mapped_address)
323 throw invalid_operation("MemoryAllocator::unmap");
324 else if(!--region.map_count)
326 const VulkanFunctions &vk = device.get_functions();
327 vk.UnmapMemory(region.memory);
328 region.mapped_address = 0;