Integrate whole chunks into unused dirty page purging machinery.

Extend per arena unused dirty page purging to manage unused dirty chunks
in aaddtion to unused dirty runs.  Rather than immediately unmapping
deallocated chunks (or purging them in the --disable-munmap case), store
them in a separate set of trees, chunks_[sz]ad_dirty.  Preferrentially
allocate dirty chunks.  When excessive unused dirty pages accumulate,
purge runs and chunks in ingegrated LRU order (and unmap chunks in the
--enable-munmap case).

Refactor extent_node_t to provide accessor functions.
This commit is contained in:
Jason Evans
2015-02-15 18:04:46 -08:00
parent 40ab8f98e4
commit ee41ad409a
12 changed files with 631 additions and 248 deletions

View File

@@ -368,8 +368,13 @@ typedef unsigned index_t;
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/chunk.h"
@@ -933,7 +938,8 @@ ivsalloc(const void *ptr, bool demote)
if (node == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(node->addr == ptr || node->size == 0);
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote));
}