Botan  1.11.15
src/lib/alloc/locking_allocator/locking_allocator.cpp
Go to the documentation of this file.
00001 /*
00002 * Mlock Allocator
00003 * (C) 2012,2014 Jack Lloyd
00004 *
00005 * Botan is released under the Simplified BSD License (see license.txt)
00006 */
00007 
00008 #include <botan/locking_allocator.h>
00009 #include <botan/mem_ops.h>
00010 #include <algorithm>
00011 #include <string>
00012 
00013 #include <sys/mman.h>
00014 #include <sys/resource.h>
00015 
00016 namespace Botan {
00017 
00018 namespace {
00019 
00020 /**
00021 * Requests for objects of sizeof(T) will be aligned at
00022 * sizeof(T)*ALIGNMENT_MULTIPLE bytes.
00023 */
00024 const size_t ALIGNMENT_MULTIPLE = 2;
00025 
00026 size_t reset_mlock_limit(size_t max_req)
00027    {
00028    struct rlimit limits;
00029    ::getrlimit(RLIMIT_MEMLOCK, &limits);
00030 
00031    if(limits.rlim_cur < limits.rlim_max)
00032       {
00033       limits.rlim_cur = limits.rlim_max;
00034       ::setrlimit(RLIMIT_MEMLOCK, &limits);
00035       ::getrlimit(RLIMIT_MEMLOCK, &limits);
00036       }
00037 
00038    return std::min<size_t>(limits.rlim_cur, max_req);
00039    }
00040 
00041 size_t mlock_limit()
00042    {
00043    /*
00044    * Linux defaults to only 64 KiB of mlockable memory per process
00045    * (too small) but BSDs offer a small fraction of total RAM (more
00046    * than we need). Bound the total mlock size to 512 KiB which is
00047    * enough to run the entire test suite without spilling to non-mlock
00048    * memory (and thus presumably also enough for many useful
00049    * programs), but small enough that we should not cause problems
00050    * even if many processes are mlocking on the same machine.
00051    */
00052    size_t mlock_requested = 512;
00053 
00054    /*
00055    * Allow override via env variable
00056    */
00057    if(const char* env = ::getenv("BOTAN_MLOCK_POOL_SIZE"))
00058       {
00059       try
00060          {
00061          const size_t user_req = std::stoul(env, nullptr);
00062          mlock_requested = std::min(user_req, mlock_requested);
00063          }
00064       catch(std::exception&) { /* ignore it */ }
00065       }
00066 
00067    return reset_mlock_limit(mlock_requested*1024);
00068    }
00069 
00070 bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
00071                  const void* buf_ptr, size_t bufsize)
00072    {
00073    const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
00074    const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
00075 
00076    if(buf < pool || buf >= pool + poolsize)
00077       return false;
00078 
00079    BOTAN_ASSERT(buf + bufsize <= pool + poolsize,
00080                 "Pointer does not partially overlap pool");
00081 
00082    return true;
00083    }
00084 
00085 size_t padding_for_alignment(size_t offset, size_t desired_alignment)
00086    {
00087    size_t mod = offset % desired_alignment;
00088    if(mod == 0)
00089       return 0; // already right on
00090    return desired_alignment - mod;
00091    }
00092 
00093 }
00094 
00095 void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
00096    {
00097    if(!m_pool)
00098       return nullptr;
00099 
00100    const size_t n = num_elems * elem_size;
00101    const size_t alignment = ALIGNMENT_MULTIPLE * elem_size;
00102 
00103    if(n / elem_size != num_elems)
00104       return nullptr; // overflow!
00105 
00106    if(n > m_poolsize || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION)
00107       return nullptr;
00108 
00109    std::lock_guard<std::mutex> lock(m_mutex);
00110 
00111    auto best_fit = m_freelist.end();
00112 
00113    for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
00114       {
00115       // If we have a perfect fit, use it immediately
00116       if(i->second == n && (i->first % alignment) == 0)
00117          {
00118          const size_t offset = i->first;
00119          m_freelist.erase(i);
00120          clear_mem(m_pool + offset, n);
00121 
00122          BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset) % alignment == 0,
00123                       "Returning correctly aligned pointer");
00124 
00125          return m_pool + offset;
00126          }
00127 
00128       if((i->second >= (n + padding_for_alignment(i->first, alignment)) &&
00129           ((best_fit == m_freelist.end()) || (best_fit->second > i->second))))
00130          {
00131          best_fit = i;
00132          }
00133       }
00134 
00135    if(best_fit != m_freelist.end())
00136       {
00137       const size_t offset = best_fit->first;
00138 
00139       const size_t alignment_padding = padding_for_alignment(offset, alignment);
00140 
00141       best_fit->first += n + alignment_padding;
00142       best_fit->second -= n + alignment_padding;
00143 
00144       // Need to realign, split the block
00145       if(alignment_padding)
00146          {
00147          /*
00148          If we used the entire block except for small piece used for
00149          alignment at the beginning, so just update the entry already
00150          in place (as it is in the correct location), rather than
00151          deleting the empty range and inserting the new one in the
00152          same location.
00153          */
00154          if(best_fit->second == 0)
00155             {
00156             best_fit->first = offset;
00157             best_fit->second = alignment_padding;
00158             }
00159          else
00160             m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
00161          }
00162 
00163       clear_mem(m_pool + offset + alignment_padding, n);
00164 
00165       BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset + alignment_padding) % alignment == 0,
00166                    "Returning correctly aligned pointer");
00167 
00168       return m_pool + offset + alignment_padding;
00169       }
00170 
00171    return nullptr;
00172    }
00173 
00174 bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size)
00175    {
00176    if(!m_pool)
00177       return false;
00178 
00179    /*
00180    We do not have to zero the memory here, as
00181    secure_allocator::deallocate does that for all arguments before
00182    invoking the deallocator (us or delete[])
00183    */
00184 
00185    size_t n = num_elems * elem_size;
00186 
00187    /*
00188    We return nullptr in allocate if there was an overflow, so we
00189    should never ever see an overflow in a deallocation.
00190    */
00191    BOTAN_ASSERT(n / elem_size == num_elems,
00192                 "No overflow in deallocation");
00193 
00194    if(!ptr_in_pool(m_pool, m_poolsize, p, n))
00195       return false;
00196 
00197    std::lock_guard<std::mutex> lock(m_mutex);
00198 
00199    const size_t start = static_cast<byte*>(p) - m_pool;
00200 
00201    auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
00202 
00203    auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
00204                              std::make_pair(start, 0), comp);
00205 
00206    // try to merge with later block
00207    if(i != m_freelist.end() && start + n == i->first)
00208       {
00209       i->first = start;
00210       i->second += n;
00211       n = 0;
00212       }
00213 
00214    // try to merge with previous block
00215    if(i != m_freelist.begin())
00216       {
00217       auto prev = std::prev(i);
00218 
00219       if(prev->first + prev->second == start)
00220          {
00221          if(n)
00222             {
00223             prev->second += n;
00224             n = 0;
00225             }
00226          else
00227             {
00228             // merge adjoining
00229             prev->second += i->second;
00230             m_freelist.erase(i);
00231             }
00232          }
00233       }
00234 
00235    if(n != 0) // no merge possible?
00236       m_freelist.insert(i, std::make_pair(start, n));
00237 
00238    return true;
00239    }
00240 
00241 mlock_allocator::mlock_allocator() :
00242    m_poolsize(mlock_limit()),
00243    m_pool(nullptr)
00244    {
00245 #if !defined(MAP_NOCORE)
00246    #define MAP_NOCORE 0
00247 #endif
00248 
00249 #if !defined(MAP_ANONYMOUS)
00250    #define MAP_ANONYMOUS MAP_ANON
00251 #endif
00252 
00253    if(m_poolsize)
00254       {
00255       m_pool = static_cast<byte*>(
00256          ::mmap(
00257             nullptr, m_poolsize,
00258             PROT_READ | PROT_WRITE,
00259             MAP_ANONYMOUS | MAP_SHARED | MAP_NOCORE,
00260             -1, 0));
00261 
00262       if(m_pool == static_cast<byte*>(MAP_FAILED))
00263          {
00264          m_pool = nullptr;
00265          throw std::runtime_error("Failed to mmap locking_allocator pool");
00266          }
00267 
00268       clear_mem(m_pool, m_poolsize);
00269 
00270       if(::mlock(m_pool, m_poolsize) != 0)
00271          {
00272          ::munmap(m_pool, m_poolsize);
00273          m_pool = nullptr;
00274          throw std::runtime_error("Could not mlock " + std::to_string(m_poolsize) + " bytes");
00275          }
00276 
00277 #if defined(MADV_DONTDUMP)
00278       ::madvise(m_pool, m_poolsize, MADV_DONTDUMP);
00279 #endif
00280 
00281       m_freelist.push_back(std::make_pair(0, m_poolsize));
00282       }
00283    }
00284 
00285 mlock_allocator::~mlock_allocator()
00286    {
00287    if(m_pool)
00288       {
00289       clear_mem(m_pool, m_poolsize);
00290       ::munlock(m_pool, m_poolsize);
00291       ::munmap(m_pool, m_poolsize);
00292       m_pool = nullptr;
00293       }
00294    }
00295 
00296 mlock_allocator& mlock_allocator::instance()
00297    {
00298    static mlock_allocator mlock;
00299    return mlock;
00300    }
00301 
00302 }