LCOV - code coverage report
Current view: top level - elsa/storage/memory_resource - PoolResource.cpp (source / functions) Hit Total Coverage
Test: coverage-all.lcov Lines: 327 357 91.6 %
Date: 2024-05-16 04:22:26 Functions: 57 60 95.0 %

          Line data    Source code
       1             : #include "PoolResource.h"
       2             : 
       3             : #include "BitUtil.h"
       4             : #include "Util.h"
       5             : 
       6             : /*
       7             :  * Free list layout:
       8             :  * let k = log2(MIN_BLOCK_SIZE)
       9             :  *
      10             :  * | n | -> [2^{k+n}..]
      11             :  * | . |
      12             :  * | 2 | -> [2^{k+2}..2^{k+3} - 1]
      13             :  * | 1 | -> [2^{k+1}..2^{k+2} - 1]
      14             :  * | 0 | -> [2^k    ..2^{k+1} - 1]
      15             :  *
      16             :  * Constant time allocation strategy:
      17             :  * - allocate from _freeLists[ceil(log2(size))]
      18             :  * - guaranteed fit
      19             :  * - downside: expect more fragmentation, e.g. when freeing and
      20             :  *   allocating a block of equal size and alignment, the block
      21             :  *   that was just freed is potentially not considered.
      22             :  *   Larger blocks may become progressively more fragmented over
      23             :  *   time.
      24             :  *
      25             :  * First fit allocation strategy:
      26             :  * - allocate from _freeLists[floor(log2(size))]
      27             :  * - linearly search for the first sufficiently large block in
      28             :  *   the list
      29             :  *
      30             :  * For all strategies:
      31             :  * - insert into _freeLists[floor(log2(size))]
      32             :  * - allocate from a larger free list, if the originally chosen
      33             :  *   one is empty/cannot service the allocation
      34             :  */
      35             : 
      36             : namespace elsa::mr
      37             : {
      38             :     namespace pool_resource
      39             :     {
      40             :         template <typename FreeListStrategy>
      41             :         PoolResource<FreeListStrategy>::PoolResource(MemoryResource upstream,
      42             :                                                      PoolResourceConfig config)
      43             :             : _upstream{upstream}, _config{config}
      44          60 :         {
      45          60 :             if (!config.validate()) {
      46           0 :                 _config = PoolResourceConfig::defaultConfig();
      47           0 :             }
      48          60 :             _freeLists.resize(detail::log2Floor(_config.maxChunkSize)
      49          60 :                                   - pool_resource::MIN_BLOCK_SIZE_LOG + 1,
      50          60 :                               nullptr);
      51          60 :             _cachedChunks =
      52          60 :                 std::make_unique<std::unique_ptr<pool_resource::Block>[]>(_config.maxCachedChunks);
      53          60 :         }
      54             : 
      55             :         template <typename FreeListStrategy>
      56             :         PoolResource<FreeListStrategy>::~PoolResource()
      57          60 :         {
      58         102 :             for (size_t i = 0; i < _cachedChunkCount; i++) {
      59          42 :                 _upstream->deallocate(_cachedChunks[i]->_address, _cachedChunks[i]->_chunkSize,
      60          42 :                                       pool_resource::BLOCK_GRANULARITY);
      61          42 :             }
      62          60 :         }
      63             : 
      64             :         template <typename FreeListStrategy>
      65             :         MemoryResource PoolResource<FreeListStrategy>::make(MemoryResource upstream,
      66             :                                                             PoolResourceConfig config)
      67          60 :         {
      68          60 :             return std::shared_ptr<MemResInterface>(new PoolResource(upstream, config),
      69          60 :                                                     [](PoolResource* p) { delete p; });
      70          60 :         }
      71             : 
      72             :         template <typename FreeListStrategy>
      73             :         void* PoolResource<FreeListStrategy>::allocate(size_t size, size_t alignment)
      74      181614 :         {
      75      181614 :             auto [realSize, blockSize] =
      76      181614 :                 util::computeSizeWithAlignment(size, alignment, pool_resource::BLOCK_GRANULARITY);
      77      181614 :             if (blockSize > _config.maxChunkSize) {
      78             :                 // allocation too big to be handled by the pool
      79           3 :                 return _upstream->allocate(size, alignment);
      80           3 :             }
      81             : 
      82      181611 :             pool_resource::Block* block =
      83      181611 :                 FreeListStrategy::selectBlock(_freeListNonEmpty, _freeLists, blockSize);
      84      181611 :             if (!block) {
      85        3535 :                 block = expandPool(blockSize);
      86             :                 // this block *must* fit the allocation, otherwise the requested size must be so
      87             :                 // large that it is forwarded to _upstream
      88      178076 :             } else {
      89      178076 :                 unlinkFreeBlock(block);
      90      178076 :             }
      91             :             // by this point, block is a registered, but unlinked block
      92      181611 :             ASSERT(block
      93      181611 :                    && detail::checkAlignment(block->_address, pool_resource::BLOCK_GRANULARITY)
      94      181611 :                    && block->size() >= realSize);
      95             : 
      96      181611 :             void* retAddress = detail::alignUp(block->_address, alignment);
      97      181611 :             size_t headSplitSize = reinterpret_cast<uintptr_t>(retAddress)
      98      181611 :                                    - reinterpret_cast<uintptr_t>(block->_address);
      99      181611 :             size_t remainingSize = block->size() - headSplitSize;
     100             : 
     101      181611 :             if (headSplitSize != 0) {
     102             :                 // insert new block after head
     103           9 :                 pool_resource::Block* newBlock;
     104           9 :                 try {
     105           9 :                     auto newBlockOwned = std::make_unique<pool_resource::Block>();
     106           9 :                     newBlock = newBlockOwned.get();
     107           9 :                     _addressToBlock.insert({retAddress, std::move(newBlockOwned)});
     108           9 :                 } catch (...) {
     109             :                     // make sure the failed allocation has no side effects (aside from potentially
     110             :                     // enlarging the pool)
     111           0 :                     linkFreeBlock(block);
     112           0 :                     throw std::bad_alloc();
     113           0 :                 }
     114             : 
     115           9 :                 block->setSize(headSplitSize);
     116             :                 // block is already in the map, but must be reinserted (into an appropriate free
     117             :                 // list)
     118           9 :                 linkFreeBlock(block);
     119             : 
     120             :                 // size and free bit are set below
     121           9 :                 newBlock->markPrevFree();
     122           9 :                 newBlock->_address = retAddress;
     123           9 :                 newBlock->_prevAddress = block->_address;
     124           9 :                 block = newBlock;
     125           9 :             }
     126             : 
     127      181611 :             block->markAllocated();
     128      181611 :             block->setSize(remainingSize);
     129      181611 :             try {
     130      181611 :                 shrinkBlockAtTail(*block, retAddress, realSize, remainingSize);
     131      181611 :                 return retAddress;
     132      181611 :             } catch (...) {
     133             :                 // this is safe to call, because it is noexcept and should free block, re-merging it
     134             :                 // with its predecessor
     135           3 :                 doDeallocate(block->_address);
     136           3 :                 throw std::bad_alloc();
     137           3 :             }
     138      181611 :         }
     139             : 
     140             :         template <typename FreeListStrategy>
     141             :         void PoolResource<FreeListStrategy>::deallocate(void* ptr, size_t size,
     142             :                                                         size_t alignment) noexcept
     143      181605 :         {
     144      181605 :             if (!ptr) {
     145           3 :                 return;
     146           3 :             }
     147             :             // This throws, if alignment is not a power of two. Due to the noexcept tag,
     148             :             // this leads to termination.
     149             :             // This behavior seems appropriate, since this is essentially an invalid free.
     150             :             // The provided pointer cannot possible be allocated with that alignment.
     151             :             // If this behavior is undesired, let me know.
     152      181602 :             auto [_, blockSize] =
     153      181602 :                 util::computeSizeWithAlignment(size, alignment, pool_resource::BLOCK_GRANULARITY);
     154      181602 :             if (blockSize > _config.maxChunkSize) {
     155             :                 // allocation too big to be handled by the pool, must have come from upstream
     156           3 :                 _upstream->deallocate(ptr, size, alignment);
     157           3 :                 return;
     158           3 :             }
     159      181599 :             doDeallocate(ptr);
     160      181599 :         }
     161             : 
     162             :         template <typename FreeListStrategy>
     163             :         void PoolResource<FreeListStrategy>::doDeallocate(void* ptr) noexcept
     164      181602 :         {
     165      181602 :             auto blockIt = _addressToBlock.find(ptr);
     166      181602 :             ASSERT(blockIt != _addressToBlock.end());
     167      181602 :             pool_resource::Block* block = blockIt->second.get();
     168      181602 :             block->markFree();
     169             : 
     170      181602 :             if (!block->isChunkStart()) {
     171      177998 :                 auto prevIt = _addressToBlock.find(block->_prevAddress);
     172      177998 :                 if (prevIt != _addressToBlock.end() && prevIt->second->isFree()) {
     173             :                     // coalesce with prev. block
     174       75647 :                     pool_resource::Block* prev = prevIt->second.get();
     175       75647 :                     unlinkFreeBlock(prev);
     176       75647 :                     prev->setSize(prev->size() + block->size());
     177       75647 :                     _addressToBlock.erase(blockIt);
     178       75647 :                     block = prev;
     179       75647 :                 }
     180      177998 :             }
     181             : 
     182      181602 :             void* nextAdress = detail::voidPtrOffset(block->_address, block->size());
     183      181602 :             auto nextIt = _addressToBlock.find(nextAdress);
     184      181602 :             if (nextIt != _addressToBlock.end() && nextIt->second->isFree()
     185      181602 :                 && !nextIt->second->isChunkStart()) { // Never coalesce accross chunk boundaries
     186             :                 // coalesce with next block
     187       74567 :                 pool_resource::Block* next = nextIt->second.get();
     188       74567 :                 unlinkFreeBlock(next);
     189       74567 :                 block->setSize(block->size() + next->size());
     190       74567 :                 _addressToBlock.erase(nextIt);
     191       74567 :             }
     192             : 
     193      181602 :             nextAdress = detail::voidPtrOffset(block->_address, block->size());
     194      181602 :             nextIt = _addressToBlock.find(nextAdress);
     195      181602 :             if (nextIt != _addressToBlock.end() && !nextIt->second->isChunkStart()) {
     196      173797 :                 std::unique_ptr<pool_resource::Block>& next = nextIt->second;
     197      173797 :                 next->markPrevFree();
     198      173797 :                 next->_prevAddress = block->_address;
     199      173797 :             }
     200      181602 :             if (block->isChunkStart() && block->size() == block->_chunkSize) {
     201        3535 :                 auto blockIt = _addressToBlock.find(block->_address);
     202        3535 :                 shrinkPool(std::move(blockIt->second));
     203        3535 :                 _addressToBlock.erase(blockIt);
     204      178067 :             } else {
     205      178067 :                 linkFreeBlock(block);
     206      178067 :             }
     207      181602 :         }
     208             : 
     209             :         template <typename FreeListStrategy>
     210             :         bool PoolResource<FreeListStrategy>::tryResize(void* ptr, size_t size, size_t alignment,
     211             :                                                        size_t newSize) noexcept
     212          21 :         {
     213          21 :             static_cast<void>(size);
     214          21 :             static_cast<void>(alignment);
     215          21 :             if (!ptr || size > _config.maxChunkSize) {
     216           0 :                 return false;
     217           0 :             }
     218          21 :             auto blockIt = _addressToBlock.find(ptr);
     219          21 :             ASSERT(blockIt != _addressToBlock.end());
     220          21 :             std::unique_ptr<pool_resource::Block>& block = blockIt->second;
     221             : 
     222          21 :             size_t realSize = util::computeRealSize(newSize, pool_resource::BLOCK_GRANULARITY);
     223          21 :             size_t currentSize = block->size();
     224          21 :             if (realSize == currentSize) {
     225           6 :                 return true;
     226          15 :             } else if (realSize > currentSize) {
     227           9 :                 void* nextAdress = detail::voidPtrOffset(ptr, currentSize);
     228           9 :                 auto nextIt = _addressToBlock.find(nextAdress);
     229           9 :                 if (nextIt != _addressToBlock.end() && nextIt->second->isFree()
     230           9 :                     && nextIt->second->_prevAddress != nullptr) {
     231           6 :                     std::unique_ptr<pool_resource::Block>& next = nextIt->second;
     232           6 :                     size_t cumulativeSize = currentSize + next->size();
     233           6 :                     if (cumulativeSize >= realSize) {
     234           6 :                         unlinkFreeBlock(next.get());
     235           6 :                         std::unique_ptr<pool_resource::Block> next = std::move(nextIt->second);
     236           6 :                         try {
     237           6 :                             _addressToBlock.erase(nextIt);
     238           6 :                         } catch (...) {
     239             :                             // Erase should never be able to throw here, so if this is reached we
     240             :                             // are in dire straits
     241           0 :                             ASSERT(0);
     242           0 :                         }
     243           6 :                         block->setSize(realSize);
     244           6 :                         if (cumulativeSize > realSize) {
     245           6 :                             next->_address = detail::voidPtrOffset(ptr, realSize);
     246           6 :                             next->setSize(cumulativeSize - realSize);
     247           6 :                             try {
     248           6 :                                 insertFreeBlock(std::move(next));
     249           6 :                             } catch (...) {
     250             :                                 // In case we cannot insert the new free block into the
     251             :                                 // map/free-list, have block subsume it. This may cause some very
     252             :                                 // undesirable internal fragmentation, but it is better than leaking
     253             :                                 // this memory.
     254           3 :                                 block->setSize(cumulativeSize);
     255           3 :                             }
     256           6 :                         }
     257           6 :                         return true;
     258           0 :                     } else {
     259           0 :                         return false;
     260           0 :                     }
     261           3 :                 } else {
     262           3 :                     return false;
     263           3 :                 }
     264           6 :             } else {
     265           6 :                 try {
     266           6 :                     shrinkBlockAtTail(*block, ptr, realSize, currentSize);
     267           6 :                 } catch (...) {
     268           3 :                     return false;
     269           3 :                 }
     270           3 :                 return true;
     271           3 :             }
     272          21 :         }
     273             : 
     274             :         template <typename FreeListStrategy>
     275             :         void PoolResource<FreeListStrategy>::insertFreeBlock(
     276             :             std::unique_ptr<pool_resource::Block>&& block)
     277      150214 :         {
     278             :             // if insert throws, this is side-effect free
     279      150214 :             auto [it, _] = _addressToBlock.insert({block->_address, std::move(block)});
     280      150214 :             linkFreeBlock(it->second.get());
     281      150214 :         }
     282             : 
     283             :         template <typename FreeListStrategy>
     284             :         void PoolResource<FreeListStrategy>::linkFreeBlock(pool_resource::Block* block)
     285      328287 :         {
     286      328287 :             size_t size = block->size();
     287      328287 :             ASSERT(detail::checkAlignment(block->_address, pool_resource::BLOCK_GRANULARITY)
     288      328287 :                    && size % pool_resource::BLOCK_GRANULARITY == 0);
     289      328287 :             uint64_t freeListLog = detail::log2Floor(size);
     290      328287 :             size_t freeListIndex = freeListLog - pool_resource::MIN_BLOCK_SIZE_LOG;
     291             :             // all blocks larger than the size for the largest free list are stored there as well
     292      328287 :             freeListIndex = std::min(freeListIndex, _freeLists.size() - 1);
     293      328287 :             block->insertAfterFree(&_freeLists[freeListIndex]);
     294      328287 :             _freeListNonEmpty |= 1 << freeListIndex;
     295      328287 :         }
     296             : 
     297             :         template <typename FreeListStrategy>
     298             :         void PoolResource<FreeListStrategy>::unlinkFreeBlock(pool_resource::Block* block)
     299      328287 :         {
     300      328287 :             block->unlinkFree();
     301      328287 :             size_t freeListIndex = freeListIndexForFreeChunk(block->size());
     302             :             // if the free list is now empty, mark it in the bitmask
     303      328287 :             if (_freeLists[freeListIndex] == nullptr) {
     304      147532 :                 _freeListNonEmpty &= ~(1 << freeListIndex);
     305      147532 :             }
     306      328287 :         }
     307             : 
     308             :         template <typename FreeListStrategy>
     309             :         size_t PoolResource<FreeListStrategy>::freeListIndexForFreeChunk(size_t size)
     310      328287 :         {
     311             :             // the largest power of 2 that fits into size determines the free list.
     312             :             // as a result of this, allocations are sometimes served from a larger free list, even
     313             :             // if a smaller one would fit. However, this categorization saves us from having to
     314             :             // iterate through free lists, looking for the best fit
     315      328287 :             uint64_t freeListLog = detail::log2Floor(size);
     316      328287 :             size_t freeListIndex = freeListLog - pool_resource::MIN_BLOCK_SIZE_LOG;
     317             :             // all blocks larger than the size for the largest free list are stored there as well
     318      328287 :             freeListIndex = std::min(freeListIndex, _freeLists.size() - 1);
     319      328287 :             return freeListIndex;
     320      328287 :         }
     321             : 
     322             :         template <typename FreeListStrategy>
     323             :         pool_resource::Block* PoolResource<FreeListStrategy>::expandPool(size_t requestedSize)
     324        3535 :         {
     325        3535 :             void* newChunkAddress;
     326        3535 :             std::unique_ptr<pool_resource::Block> newChunk = nullptr;
     327        3535 :             for (size_t i = 0; i < _cachedChunkCount; i++) {
     328         438 :                 if (_cachedChunks[i]->size() >= requestedSize) {
     329         438 :                     --_cachedChunkCount;
     330         438 :                     std::swap(_cachedChunks[i], _cachedChunks[_cachedChunkCount]);
     331         438 :                     newChunk = std::move(_cachedChunks[_cachedChunkCount]);
     332         438 :                     newChunkAddress = newChunk->_address;
     333         438 :                     break;
     334         438 :                 }
     335         438 :             }
     336             : 
     337        3535 :             if (!newChunk) {
     338             :                 // This should be enforced in allocate, by forwarding to _upstream
     339        3097 :                 ASSERT(requestedSize <= _config.maxChunkSize);
     340             :                 // Rationale for the chunk size: if a chunk of this size is requested,
     341             :                 // another chunk of similar size will likely be requested soon. With this
     342             :                 // choice of chunkSize, 4 such allocations can be served without allocating
     343             :                 // from _upstream again
     344        3097 :                 size_t chunkSize =
     345        3097 :                     std::min(std::max(requestedSize << 2, _config.chunkSize), _config.maxChunkSize);
     346        3097 :                 chunkSize = detail::alignUp(chunkSize, pool_resource::BLOCK_GRANULARITY);
     347             :                 // May throw std::bad_alloc
     348        3097 :                 newChunkAddress = _upstream->allocate(chunkSize, pool_resource::BLOCK_GRANULARITY);
     349        3097 :                 newChunk = std::make_unique<pool_resource::Block>();
     350        3097 :                 newChunk->_address = newChunkAddress;
     351        3097 :                 newChunk->_size =
     352        3097 :                     chunkSize | pool_resource::FREE_BIT | pool_resource::CHUNK_START_BIT;
     353        3097 :                 newChunk->_chunkSize = chunkSize;
     354        3097 :             }
     355             : 
     356        3535 :             try {
     357        3535 :                 auto [it, _] = _addressToBlock.insert({newChunkAddress, std::move(newChunk)});
     358        3535 :                 return it->second.get();
     359        3535 :             } catch (...) {
     360           0 :                 _upstream->deallocate(newChunkAddress, newChunk->_chunkSize,
     361           0 :                                       pool_resource::BLOCK_GRANULARITY);
     362           0 :                 throw;
     363           0 :             }
     364        3535 :         }
     365             : 
     366             :         template <typename FreeListStrategy>
     367             :         void PoolResource<FreeListStrategy>::shrinkPool(std::unique_ptr<pool_resource::Block> chunk)
     368        3535 :         {
     369        3535 :             if (_cachedChunkCount < _config.maxCachedChunks) {
     370         480 :                 _cachedChunks[_cachedChunkCount++] = std::move(chunk);
     371        3055 :             } else {
     372        3055 :                 _upstream->deallocate(chunk->_address, chunk->_chunkSize,
     373        3055 :                                       pool_resource::BLOCK_GRANULARITY);
     374        3055 :             }
     375        3535 :         }
     376             : 
     377             :         template <typename FreeListStrategy>
     378             :         void PoolResource<FreeListStrategy>::shrinkBlockAtTail(pool_resource::Block& block,
     379             :                                                                void* blockAddress, size_t newSize,
     380             :                                                                size_t oldSize)
     381      181608 :         {
     382             :             // oldSize and newSize must be multiples of pool_resource::BLOCK_GRANULARITY
     383      181608 :             size_t tailSplitSize = oldSize - newSize;
     384      181608 :             if (tailSplitSize != 0) {
     385      150214 :                 void* subblockAddress = detail::voidPtrOffset(blockAddress, newSize);
     386             :                 // insert sub-block after the allocation into a free-list
     387      150214 :                 try {
     388      150214 :                     auto subblock = std::make_unique<pool_resource::Block>();
     389      150214 :                     subblock->_size = tailSplitSize | pool_resource::FREE_BIT;
     390      150214 :                     subblock->_address = subblockAddress;
     391      150214 :                     subblock->_prevAddress = blockAddress;
     392      150214 :                     insertFreeBlock(std::move(subblock));
     393      150214 :                 } catch (...) {
     394           6 :                     throw;
     395           6 :                 }
     396             : 
     397      150208 :                 block.setSize(newSize);
     398             :                 // Current state:
     399             :                 //      .---_prevAddress----.
     400             :                 //      v                   |
     401             :                 // +---------+---------+-----------+
     402             :                 // | A       | A'      | B         |
     403             :                 // +---------+---------+-----------+
     404             :                 // Where A is the original block, A' is the subblock created in
     405             :                 // its tail, and B is the successor.
     406             :                 // So, the successor's (B) _prevAddress must be adjusted
     407             :                 // (if there is one and we are not at the end of the chunk)
     408      150208 :                 void* successorAddress = detail::voidPtrOffset(blockAddress, oldSize);
     409      150208 :                 auto successorIt = _addressToBlock.find(successorAddress);
     410      150208 :                 if (successorIt != _addressToBlock.end() && !successorIt->second->isChunkStart()) {
     411       63188 :                     successorIt->second->_prevAddress = subblockAddress;
     412       63188 :                 }
     413      150208 :             }
     414      181608 :         }
     415             : 
     416             :         void Block::markFree()
     417      181602 :         {
     418      181602 :             _size |= FREE_BIT;
     419      181602 :         }
     420             : 
     421             :         void Block::markAllocated()
     422      181602 :         {
     423      181602 :             _size &= ~FREE_BIT;
     424      181602 :         }
     425             : 
     426             :         void Block::markChunkStart()
     427           0 :         {
     428           0 :             _size &= ~CHUNK_START_BIT;
     429           0 :         }
     430             : 
     431             :         void Block::markPrevFree()
     432      173806 :         {
     433      173806 :             _size |= PREV_FREE_BIT;
     434      173806 :         }
     435             : 
     436             :         void Block::markPrevAllocated()
     437           0 :         {
     438           0 :             _size &= ~PREV_FREE_BIT;
     439           0 :         }
     440             : 
     441             :         bool Block::isFree()
     442      359594 :         {
     443      359594 :             return _size & FREE_BIT;
     444      359594 :         }
     445             : 
     446             :         bool Block::isPrevFree()
     447           0 :         {
     448           0 :             return _size & PREV_FREE_BIT;
     449           0 :         }
     450             : 
     451             :         bool Block::isChunkStart()
     452      676501 :         {
     453      676501 :             return _size & CHUNK_START_BIT;
     454      676501 :         }
     455             : 
     456             :         void Block::unlinkFree()
     457      328287 :         {
     458      328287 :             *_pprevFree = _nextFree;
     459      328287 :             if (_nextFree) {
     460      155967 :                 _nextFree->_pprevFree = _pprevFree;
     461      155967 :             }
     462      328287 :         }
     463             : 
     464             :         void Block::insertAfterFree(Block** pprev)
     465      328287 :         {
     466      328287 :             _pprevFree = pprev;
     467      328287 :             _nextFree = *pprev;
     468      328287 :             *pprev = this;
     469      328287 :             if (_nextFree) {
     470      180755 :                 _nextFree->_pprevFree = &_nextFree;
     471      180755 :             }
     472      328287 :         }
     473             : 
     474             :         size_t Block::size()
     475     1717049 :         {
     476     1717049 :             return _size & SIZE_MASK;
     477     1717049 :         }
     478             : 
     479             :         void Block::setSize(size_t size)
     480      482048 :         {
     481      482048 :             ASSERT((size & BITFIELD_MASK) == 0);
     482      482048 :             _size = (_size & BITFIELD_MASK) | size;
     483      482048 :         }
     484             : 
     485             :         pool_resource::Block*
     486             :             ConstantTimeFit::selectBlock(uint64_t listOccupancy,
     487             :                                          const std::vector<pool_resource::Block*>& freeLists,
     488             :                                          size_t blockSize)
     489      121068 :         {
     490      121068 :             size_t logBlockSize = detail::log2Ceil(blockSize);
     491      121068 :             size_t minFreeListIndex = logBlockSize - pool_resource::MIN_BLOCK_SIZE_LOG;
     492      121068 :             uint64_t matchingFreeListMask = ~((1 << static_cast<uint64_t>(minFreeListIndex)) - 1);
     493      121068 :             uint64_t freeListIndex = detail::lowestSetBit(listOccupancy & matchingFreeListMask) - 1;
     494      121068 :             if (freeListIndex == std::numeric_limits<uint64_t>::max()) {
     495         126 :                 return nullptr;
     496      120942 :             } else {
     497      120942 :                 return freeLists[freeListIndex];
     498      120942 :             }
     499      121068 :         }
     500             : 
     501             :         pool_resource::Block*
     502             :             FirstFit::selectBlock(uint64_t listOccupancy,
     503             :                                   const std::vector<pool_resource::Block*>& freeLists,
     504             :                                   size_t blockSize)
     505       60534 :         {
     506       60534 :             size_t logBlockSize = detail::log2Floor(blockSize);
     507       60534 :             size_t minFreeListIndex = logBlockSize - pool_resource::MIN_BLOCK_SIZE_LOG;
     508       60534 :             uint64_t matchingFreeListMask = ~((1 << static_cast<uint64_t>(minFreeListIndex)) - 1);
     509       60534 :             uint64_t freeListIndex = detail::lowestSetBit(listOccupancy & matchingFreeListMask) - 1;
     510       60534 :             if (freeListIndex == std::numeric_limits<uint64_t>::max()) {
     511          63 :                 return nullptr;
     512       60471 :             } else if (freeListIndex == minFreeListIndex) {
     513             :                 // first fit search
     514       30396 :                 for (pool_resource::Block* block = freeLists[freeListIndex]; block;
     515       27050 :                      block = block->_nextFree) {
     516       27050 :                     if (block->size() >= blockSize) {
     517       15664 :                         return block;
     518       15664 :                     }
     519       27050 :                 }
     520       19010 :                 matchingFreeListMask &= ~(1 << static_cast<uint64_t>(freeListIndex));
     521        3346 :                 freeListIndex = detail::lowestSetBit(listOccupancy & matchingFreeListMask);
     522        3346 :                 if (freeListIndex == std::numeric_limits<uint64_t>::max()) {
     523           0 :                     return freeLists[freeListIndex];
     524        3346 :                 } else {
     525        3346 :                     return nullptr;
     526        3346 :                 }
     527       41461 :             } else {
     528       41461 :                 return freeLists[freeListIndex];
     529       41461 :             }
     530       60534 :         }
     531             : 
     532             :         pool_resource::Block*
     533             :             HybridFit::selectBlock(uint64_t listOccupancy,
     534             :                                    const std::vector<pool_resource::Block*>& freeLists,
     535             :                                    size_t blockSize)
     536       60534 :         {
     537       60534 :             pool_resource::Block* block =
     538       60534 :                 ConstantTimeFit::selectBlock(listOccupancy, freeLists, blockSize);
     539       60534 :             if (block) {
     540       60471 :                 return block;
     541       60471 :             }
     542             : 
     543          63 :             size_t logBlockSize = detail::log2Floor(blockSize);
     544          63 :             size_t freeListIndex = logBlockSize - pool_resource::MIN_BLOCK_SIZE_LOG;
     545          63 :             for (block = freeLists[freeListIndex]; block; block = block->_nextFree) {
     546           0 :                 if (block->size() >= blockSize) {
     547           0 :                     return block;
     548           0 :                 }
     549           0 :             }
     550             : 
     551          63 :             return nullptr;
     552          63 :         }
     553             : 
     554             :         template class PoolResource<ConstantTimeFit>;
     555             :         template class PoolResource<FirstFit>;
     556             :         template class PoolResource<HybridFit>;
     557             :     } // namespace pool_resource
     558             : } // namespace elsa::mr

Generated by: LCOV version 1.14