diff options
author | Josh Blum | 2013-04-30 20:54:06 -0700 |
---|---|---|
committer | Josh Blum | 2013-04-30 20:54:06 -0700 |
commit | a373e34808d55d1775fd84a0e5e013455fed67e7 (patch) | |
tree | 5d53e8c4d998a76709033a8f360d697f53b25c09 /lib/theron_allocator.cpp | |
parent | e085418e9299af5590157d3abe61bd0a3c3ed93d (diff) | |
download | sandhi-a373e34808d55d1775fd84a0e5e013455fed67e7.tar.gz sandhi-a373e34808d55d1775fd84a0e5e013455fed67e7.tar.bz2 sandhi-a373e34808d55d1775fd84a0e5e013455fed67e7.zip |
benchmarks: removed keep_m_in_n block from many rates
This block is optimized for laziness and not performance since it
returns before finishing its input based on whatever M is.
This means that m=1, n=1, this block produces 1000's of outputs per input buffer,
and its not really what I am looking to benchmark...
ironically, keep_m_in_n crappy implementation makes for big wins on GRAS
either due to lower scheduler overhead or locking contentions easier.
However, its crazy number of tiny outputs really rails on the
mailbox queue of the next block in the chain and causes
the caching allocators to get very exited.
I guess thats all fine since the block is meant to be lazy...
But I cant measure the effectiveness of a typical allocator situation.
Signing off...
Diffstat (limited to 'lib/theron_allocator.cpp')
-rw-r--r-- | lib/theron_allocator.cpp | 100 |
1 files changed, 0 insertions, 100 deletions
diff --git a/lib/theron_allocator.cpp b/lib/theron_allocator.cpp deleted file mode 100644 index 9db9367..0000000 --- a/lib/theron_allocator.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) by Josh Blum. See LICENSE.txt for licensing information. - -/*********************************************************************** - * There is allocation overhead for sending messages. - * Want per worker-allocator for message allocation... - * But until thats possible, install a new global allocator. - * This allocator uses a fixed pool for small sized buffers, - * and otherwise the regular malloc/free for larger buffers. - **********************************************************************/ - -#include <gras/gras.hpp> -#include <gras_impl/debug.hpp> -#include <Theron/Detail/Threading/SpinLock.h> -#include <Theron/IAllocator.h> -#include <Theron/AllocatorManager.h> -#include <boost/circular_buffer.hpp> - -#define MY_ALLOCATOR_CHUNK_SIZE 256 -#define MY_ALLOCATOR_POOL_SIZE (MY_ALLOCATOR_CHUNK_SIZE * (1 << 18)) - -static unsigned long long unwanted_malloc_count = 0; - -static struct ExitPrinter -{ - ExitPrinter(void){} - ~ExitPrinter(void) - { - if (unwanted_malloc_count) - { - VAR(unwanted_malloc_count); - } - } -} exit_printer; - -static struct WorkerAllocator : Theron::IAllocator -{ - WorkerAllocator(void) - { - const size_t N = MY_ALLOCATOR_POOL_SIZE/MY_ALLOCATOR_CHUNK_SIZE; - queue.set_capacity(N); - for (size_t i = 0; i < N; i++) - { - const ptrdiff_t pool_ptr = ptrdiff_t(pool) + i*MY_ALLOCATOR_CHUNK_SIZE; - queue.push_back((void *)pool_ptr); - } - pool_end = ptrdiff_t(pool) + MY_ALLOCATOR_POOL_SIZE; - Theron::AllocatorManager::Instance().SetAllocator(this); - } - - ~WorkerAllocator(void) - { - //NOP - } - - void *Allocate(const SizeType size) - { - if GRAS_LIKELY(size <= MY_ALLOCATOR_CHUNK_SIZE) - { - mSpinLock.Lock(); - if GRAS_UNLIKELY(queue.empty()) - { - unwanted_malloc_count++; - mSpinLock.Unlock(); - return std::malloc(size); - } - void *memory = queue.front(); - queue.pop_front(); - mSpinLock.Unlock(); - return memory; - } - else - { - //std::cout << "malloc size " << size << std::endl; - return std::malloc(size); - } - } - - void Free(void *const memory) - { - const bool in_pool = ptrdiff_t(memory) >= ptrdiff_t(pool) and ptrdiff_t(memory) < pool_end; - if GRAS_LIKELY(in_pool) - { - mSpinLock.Lock(); - queue.push_front(memory); - mSpinLock.Unlock(); - } - else - { - std::free(memory); - } - } - - boost::circular_buffer<void *> queue; - THERON_PREALIGN(GRAS_MAX_ALIGNMENT) - char pool[MY_ALLOCATOR_POOL_SIZE] - THERON_POSTALIGN(GRAS_MAX_ALIGNMENT); - ptrdiff_t pool_end; - Theron::Detail::SpinLock mSpinLock; - -} my_alloc; |