]> git.tdb.fi Git - libs/gl.git/blobdiff - source/backends/vulkan/memoryallocator.cpp
Avoid allocating buffers and images too close together
[libs/gl.git] / source / backends / vulkan / memoryallocator.cpp
index 227b54b6595904041cdff1d77c81b6944e0fa3b2..8dbe94eb5858b66d79f5723ecb314f02859b7b79 100644 (file)
@@ -1,6 +1,8 @@
 #include <msp/core/algorithm.h>
 #include <msp/core/maputils.h>
 #include <msp/graphics/vulkancontext_platform.h>
+#include <msp/stringcodec/utf8.h>
+#include <msp/strings/format.h>
 #include "device.h"
 #include "error.h"
 #include "memoryallocator.h"
@@ -54,7 +56,7 @@ MemoryAllocator::~MemoryAllocator()
                        vk.FreeMemory(r.memory);
 }
 
-unsigned MemoryAllocator::find_memory_pool(unsigned mask, MemoryType type)
+unsigned MemoryAllocator::find_memory_pool(unsigned mask, MemoryType type) const
 {
        for(unsigned i=0; i<pools.size(); ++i)
                if((mask&(1<<i)) && pools[i].type==type)
@@ -84,14 +86,48 @@ unsigned MemoryAllocator::create_region(unsigned pool_index, size_t size, bool d
        return regions.size()-1;
 }
 
-vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size)
+vector<unsigned>::iterator MemoryAllocator::lower_bound_by_size(vector<unsigned> &indices, size_t size) const
 {
        return lower_bound(indices, size, [this](unsigned j, unsigned s){ return blocks[j].size<s; });
 }
 
-unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType type)
+size_t MemoryAllocator::get_alloc_offset(const Block &block, size_t size, size_t align, BlockType type) const
 {
-       unsigned pool_index = find_memory_pool(type_bits, type);
+       size_t offset = block.offset;
+       if(type!=block.type && block.prev>=0 && type!=blocks[block.prev].type)
+       {
+               offset += buffer_image_granularity-1;
+               offset -= offset%buffer_image_granularity;
+       }
+
+       offset += align-1;
+       offset -= offset%align;
+
+       if(type==BUFFER)
+       {
+               size_t offset2 = block.offset+block.size-size;
+               offset2 -= offset2%align;
+               offset = max(offset, offset2);
+       }
+
+       return offset-block.offset;
+}
+
+void MemoryAllocator::update_largest_free(Pool &pool)
+{
+       for(auto i=pool.free_blocks.end(); ((pool.largest_free_buffer<0 || pool.largest_free_image<0) && i!=pool.free_blocks.begin()); )
+       {
+               --i;
+               if(pool.largest_free_buffer<0 && (blocks[*i].type==BUFFER || blocks[*i].type==UNDECIDED))
+                       pool.largest_free_buffer = *i;
+               if(pool.largest_free_image<0 && (blocks[*i].type==IMAGE || blocks[*i].type==UNDECIDED))
+                       pool.largest_free_image = *i;
+       }
+}
+
+unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits, MemoryType mem_type, BlockType block_type)
+{
+       unsigned pool_index = find_memory_pool(type_bits, mem_type);
        Pool &pool = pools[pool_index];
 
        if(size>=direct_alloc_threshold)
@@ -100,21 +136,26 @@ unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits
                block.region = create_region(pool_index, size, true);
                block.size = size;
                block.allocated = true;
+               block.type = block_type;
 
                blocks.push_back(block);
                return blocks.size()-1;
        }
 
-       if(pool.can_consolidate && blocks[pool.free_blocks.back()].size<size+align)
+       int largest_free = (block_type==BUFFER ? pool.largest_free_buffer : pool.largest_free_image);
+       if(pool.can_consolidate && blocks[largest_free].size<size+align)
                consolidate(pool_index);
 
        auto i = lower_bound_by_size(pool.free_blocks, size);
        for(; i!=pool.free_blocks.end(); ++i)
        {
                Block &block = blocks[*i];
-               size_t offset = align-1-(block.offset+align-1)%align;
-               if(offset+size<=block.size)
-                       break;
+               if(block.type==UNDECIDED || block.type==block_type)
+               {
+                       size_t offset = get_alloc_offset(block, size, align, block_type);
+                       if(offset+size<=block.size)
+                               break;
+               }
        }
 
        unsigned block_index;
@@ -124,18 +165,22 @@ unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits
                pool.free_blocks.erase(i);
                if(pool.free_blocks.empty())
                        pool.can_consolidate = false;
+               if(static_cast<int>(block_index)==pool.largest_free_buffer)
+                       pool.largest_free_buffer = -1;
+               if(static_cast<int>(block_index)==pool.largest_free_image)
+                       pool.largest_free_image = -1;
        }
        else
        {
                Block block;
                block.region = create_region(pool_index, default_region_size, false);
-               block.size = default_region_size;
+               block.size = regions[block.region].size;
 
                blocks.push_back(block);
                block_index = blocks.size()-1;
        }
 
-       size_t offset = align-1-(blocks[block_index].offset+align-1)%align;
+       size_t offset = get_alloc_offset(blocks[block_index], size, align, block_type);
        if(offset)
        {
                unsigned head_index = block_index;
@@ -152,6 +197,9 @@ unsigned MemoryAllocator::allocate(size_t size, size_t align, unsigned type_bits
        }
 
        blocks[block_index].allocated = true;
+       blocks[block_index].type = block_type;
+
+       update_largest_free(pool);
 
        return block_index;
 }
@@ -221,6 +269,10 @@ void MemoryAllocator::consolidate(unsigned pool_index)
                                pool.free_blocks[k] = pool.free_blocks[--i];
                }
        }
+
+       pool.largest_free_buffer = -1;
+       pool.largest_free_image = -1;
+       update_largest_free(pool);
 }
 
 void MemoryAllocator::merge_block_with_next(unsigned index)
@@ -243,7 +295,7 @@ unsigned MemoryAllocator::allocate(VkBuffer buffer, MemoryType type)
        VkMemoryRequirements requirements;
        vk.GetBufferMemoryRequirements(buffer, requirements);
 
-       unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type);
+       unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type, BUFFER);
 
        Block &block = blocks[block_index];
        vk.BindBufferMemory(buffer, regions[block.region].memory, block.offset);
@@ -258,7 +310,7 @@ unsigned MemoryAllocator::allocate(VkImage image, MemoryType type)
        VkMemoryRequirements requirements;
        vk.GetImageMemoryRequirements(image, requirements);
 
-       unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type);
+       unsigned block_index = allocate(requirements.size, requirements.alignment, requirements.memoryTypeBits, type, IMAGE);
 
        Block &block = blocks[block_index];
        vk.BindImageMemory(image, regions[block.region].memory, block.offset);
@@ -291,6 +343,17 @@ void MemoryAllocator::release(unsigned id)
        pool.free_blocks.insert(lower_bound_by_size(pool.free_blocks, block.size), block_index);
        if((block.prev>=0 && !blocks[block.prev].allocated) || (block.next>=0 && !blocks[block.next].allocated))
                pool.can_consolidate = true;
+
+       if(block.type==BUFFER)
+       {
+               if(pool.largest_free_buffer<0 || blocks[pool.largest_free_buffer].size<block.size)
+                       pool.largest_free_buffer = block_index;
+       }
+       else if(block.type==IMAGE)
+       {
+               if(pool.largest_free_image<0 || blocks[pool.largest_free_image].size<block.size)
+                       pool.largest_free_image = block_index;
+       }
 }
 
 void *MemoryAllocator::map(unsigned id)
@@ -329,5 +392,106 @@ void MemoryAllocator::unmap(unsigned id)
        }
 }
 
+string MemoryAllocator::get_debug() const
+{
+       static const StringCodec::unichar bar_chars[] = { 0xB7, 0x2596, 0x258C, 0x2597, 0x2584, 0x2599, 0x2590, 0x259F, 0x2588 };  // ·▖▌▗▄▙▐▟█
+
+       string debug;
+       for(unsigned i=0; i<pools.size(); ++i)
+       {
+               const Pool &pool = pools[i];
+
+               string pool_debug;
+               size_t total_heap = 0;
+               size_t total_used = 0;
+               for(unsigned j=0; j<regions.size(); ++j)
+                       if(regions[j].pool==static_cast<int>(i))
+                       {
+                               total_heap += regions[j].size;
+                               pool_debug += format("  Region %d: %d kB", j, (regions[j].size+512)/1024);
+                               if(regions[j].direct)
+                                       pool_debug += ", direct";
+                               pool_debug += '\n';
+
+                               int block_index = -1;
+                               for(unsigned k=0; (block_index<0 && k<blocks.size()); ++k)
+                                       if(blocks[k].region==static_cast<int>(j) && blocks[k].offset==0)
+                                               block_index = k;
+
+                               unsigned slice_index = 0;
+                               unsigned slice_data = 0;
+
+                               string bar = "    [";
+                               string region_debug;
+                               StringCodec::Utf8::Encoder bar_enc;
+                               while(block_index>=0)
+                               {
+                                       const Block &block = blocks[block_index];
+                                       if(block.allocated)
+                                               total_used += block.size;
+                                       const char *state_str = (block.allocated ? "allocated" : "free");
+                                       const char *type_str = (block.type==BUFFER ? "buffer" : block.type==IMAGE ? "image" : "undecided");
+                                       region_debug += format("    Block %d: %d bytes at %d, %s %s\n", block_index, block.size, block.offset, state_str, type_str);
+                                       block_index = block.next;
+
+                                       size_t block_end = block.offset+block.size;
+                                       while(1)
+                                       {
+                                               size_t slice_end = regions[j].size*(slice_index+1)/140;
+                                               slice_data |= 1<<(block.allocated+slice_index%2*2);
+                                               if(slice_end>block_end)
+                                                       break;
+                                               ++slice_index;
+                                               if(slice_index%2==0)
+                                               {
+                                                       slice_data = 5+((slice_data>>1)&5)-(slice_data&5);
+                                                       bar_enc.encode_char(bar_chars[(slice_data&3)+3*((slice_data>>2)&3)], bar);
+                                                       slice_data = 0;
+                                               }
+                                       }
+                               }
+
+                               bar += "]\n";
+                               if(!regions[j].direct)
+                                       pool_debug += bar;
+                               pool_debug += region_debug;
+                       }
+
+               if(!pool_debug.empty())
+               {
+                       MemoryType t = pool.type;
+                       const char *type_str = (t==DEVICE_MEMORY ? "device" : t==STAGING_MEMORY ? "staging" :
+                               t==STREAMING_MEMORY ? "streaming" : "unknown");
+                       debug += format("Pool %d: %s, %d/%d kB used\n", i, type_str, (total_used+512)/1024, (total_heap+512)/1024);
+                       debug += pool_debug;
+               }
+
+               if(!pool.free_blocks.empty())
+               {
+                       debug += "  Free blocks:\n";
+                       for(unsigned j: pool.free_blocks)
+                       {
+                               const char *type = (blocks[j].type==BUFFER ? "buffer" : blocks[j].type==IMAGE ? "image" : "undecided");
+                               debug += format("    Block %d: %d bytes, %s", j, blocks[j].size, type);
+                               unsigned largest_flags = (static_cast<int>(j)==pool.largest_free_buffer)+(static_cast<int>(j)==pool.largest_free_image)*2;
+                               if(largest_flags)
+                               {
+                                       debug += " (largest free ";
+                                       if(largest_flags&1)
+                                               debug += "buffer";
+                                       if(largest_flags==3)
+                                               debug += ", ";
+                                       if(largest_flags&2)
+                                               debug += "image";
+                                       debug += ')';
+                               }
+                               debug += '\n';
+                       }
+               }
+       }
+
+       return debug;
+}
+
 } // namespace GL
 } // namespace Msp