Botan
1.11.15
|
#include <locking_allocator.h>
Public Member Functions | |
void * | allocate (size_t num_elems, size_t elem_size) |
bool | deallocate (void *p, size_t num_elems, size_t elem_size) |
mlock_allocator (const mlock_allocator &) | |
mlock_allocator & | operator= (const mlock_allocator &) |
Static Public Member Functions | |
static mlock_allocator & | instance () |
Definition at line 17 of file locking_allocator.h.
void * Botan::mlock_allocator::allocate | ( | size_t | num_elems, |
size_t | elem_size | ||
) |
Definition at line 95 of file locking_allocator.cpp.
References BOTAN_ASSERT, Botan::clear_mem(), m_mutex, and n.
{ if(!m_pool) return nullptr; const size_t n = num_elems * elem_size; const size_t alignment = ALIGNMENT_MULTIPLE * elem_size; if(n / elem_size != num_elems) return nullptr; // overflow! if(n > m_poolsize || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION) return nullptr; std::lock_guard<std::mutex> lock(m_mutex); auto best_fit = m_freelist.end(); for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i) { // If we have a perfect fit, use it immediately if(i->second == n && (i->first % alignment) == 0) { const size_t offset = i->first; m_freelist.erase(i); clear_mem(m_pool + offset, n); BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset) % alignment == 0, "Returning correctly aligned pointer"); return m_pool + offset; } if((i->second >= (n + padding_for_alignment(i->first, alignment)) && ((best_fit == m_freelist.end()) || (best_fit->second > i->second)))) { best_fit = i; } } if(best_fit != m_freelist.end()) { const size_t offset = best_fit->first; const size_t alignment_padding = padding_for_alignment(offset, alignment); best_fit->first += n + alignment_padding; best_fit->second -= n + alignment_padding; // Need to realign, split the block if(alignment_padding) { /* If we used the entire block except for small piece used for alignment at the beginning, so just update the entry already in place (as it is in the correct location), rather than deleting the empty range and inserting the new one in the same location. */ if(best_fit->second == 0) { best_fit->first = offset; best_fit->second = alignment_padding; } else m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding)); } clear_mem(m_pool + offset + alignment_padding, n); BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset + alignment_padding) % alignment == 0, "Returning correctly aligned pointer"); return m_pool + offset + alignment_padding; } return nullptr; }
bool Botan::mlock_allocator::deallocate | ( | void * | p, |
size_t | num_elems, | ||
size_t | elem_size | ||
) |
Definition at line 174 of file locking_allocator.cpp.
References BOTAN_ASSERT, m_mutex, n, x, and y.
{ if(!m_pool) return false; /* We do not have to zero the memory here, as secure_allocator::deallocate does that for all arguments before invoking the deallocator (us or delete[]) */ size_t n = num_elems * elem_size; /* We return nullptr in allocate if there was an overflow, so we should never ever see an overflow in a deallocation. */ BOTAN_ASSERT(n / elem_size == num_elems, "No overflow in deallocation"); if(!ptr_in_pool(m_pool, m_poolsize, p, n)) return false; std::lock_guard<std::mutex> lock(m_mutex); const size_t start = static_cast<byte*>(p) - m_pool; auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; }; auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(), std::make_pair(start, 0), comp); // try to merge with later block if(i != m_freelist.end() && start + n == i->first) { i->first = start; i->second += n; n = 0; } // try to merge with previous block if(i != m_freelist.begin()) { auto prev = std::prev(i); if(prev->first + prev->second == start) { if(n) { prev->second += n; n = 0; } else { // merge adjoining prev->second += i->second; m_freelist.erase(i); } } } if(n != 0) // no merge possible? m_freelist.insert(i, std::make_pair(start, n)); return true; }
mlock_allocator & Botan::mlock_allocator::instance | ( | ) | [static] |
Definition at line 296 of file locking_allocator.cpp.
Referenced by Botan::secure_allocator< T >::allocate(), and Botan::secure_allocator< T >::deallocate().
{ static mlock_allocator mlock; return mlock; }
mlock_allocator& Botan::mlock_allocator::operator= | ( | const mlock_allocator & | ) |