Update hash from MurmurHash2 to MurmurHash3.
Update hash from MurmurHash2 to MurmurHash3, primarily because the latter generates 128 bits in a single call for no extra cost, which simplifies integration with cuckoo hashing.
This commit is contained in:
parent
7329a4f038
commit
ae03bf6a57
@ -5,7 +5,7 @@ typedef struct ckh_s ckh_t;
|
||||
typedef struct ckhc_s ckhc_t;
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *);
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
@ -75,11 +75,9 @@ bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
|
||||
void ckh_string_hash(const void *key, unsigned minbits, size_t *hash1,
|
||||
size_t *hash2);
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
|
||||
size_t *hash2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
|
@ -1,3 +1,8 @@
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
|
||||
* details.
|
||||
*/
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
@ -14,55 +19,311 @@
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint64_t hash(const void *key, size_t len, uint64_t seed);
|
||||
void hash(const void *key, size_t len, const uint32_t seed,
|
||||
size_t r_hash[2]);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
||||
/*
|
||||
* The following hash function is based on MurmurHash64A(), placed into the
|
||||
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
|
||||
* details.
|
||||
*/
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash(const void *key, size_t len, uint64_t seed)
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
|
||||
const int r = 47;
|
||||
uint64_t h = seed ^ (len * m);
|
||||
const uint64_t *data = (const uint64_t *)key;
|
||||
const uint64_t *end = data + (len/8);
|
||||
const unsigned char *data2;
|
||||
|
||||
assert(((uintptr_t)key & 0x7) == 0);
|
||||
return (x << r) | (x >> (32 - r));
|
||||
}
|
||||
|
||||
while(data != end) {
|
||||
uint64_t k = *data++;
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
}
|
||||
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i)
|
||||
{
|
||||
|
||||
h ^= k;
|
||||
h *= m;
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i)
|
||||
{
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_fmix_32(uint32_t h)
|
||||
{
|
||||
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_fmix_64(uint64_t k)
|
||||
{
|
||||
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xff51afd7ed558ccdLLU);
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xc4ceb9fe1a85ec53LLU);
|
||||
k ^= k >> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = hash_rotl_32(h1, 13);
|
||||
h1 = h1*5 + 0xe6546b64;
|
||||
}
|
||||
}
|
||||
|
||||
data2 = (const unsigned char *)data;
|
||||
switch(len & 7) {
|
||||
case 7: h ^= ((uint64_t)(data2[6])) << 48;
|
||||
case 6: h ^= ((uint64_t)(data2[5])) << 40;
|
||||
case 5: h ^= ((uint64_t)(data2[4])) << 32;
|
||||
case 4: h ^= ((uint64_t)(data2[3])) << 24;
|
||||
case 3: h ^= ((uint64_t)(data2[2])) << 16;
|
||||
case 2: h ^= ((uint64_t)(data2[1])) << 8;
|
||||
case 1: h ^= ((uint64_t)(data2[0]));
|
||||
h *= m;
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
|
||||
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch (len & 3) {
|
||||
case 3: k1 ^= tail[2] << 16;
|
||||
case 2: k1 ^= tail[1] << 8;
|
||||
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
h ^= h >> r;
|
||||
h *= m;
|
||||
h ^= h >> r;
|
||||
/* finalization */
|
||||
h1 ^= len;
|
||||
|
||||
return (h);
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return h1;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
uint32_t h2 = seed;
|
||||
uint32_t h3 = seed;
|
||||
uint32_t h4 = seed;
|
||||
|
||||
const uint32_t c1 = 0x239b961b;
|
||||
const uint32_t c2 = 0xab0e9789;
|
||||
const uint32_t c3 = 0x38b34ae5;
|
||||
const uint32_t c4 = 0xa1e38b93;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
|
||||
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
|
||||
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
|
||||
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_32(h1, 19); h1 += h2;
|
||||
h1 = h1*5 + 0x561ccd1b;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_32(h2, 17); h2 += h3;
|
||||
h2 = h2*5 + 0x0bcaa747;
|
||||
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
h3 = hash_rotl_32(h3, 15); h3 += h4;
|
||||
h3 = h3*5 + 0x96cd1c35;
|
||||
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
h4 = hash_rotl_32(h4, 13); h4 += h1;
|
||||
h4 = h4*5 + 0x32ac3b17;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
|
||||
uint32_t k1 = 0;
|
||||
uint32_t k2 = 0;
|
||||
uint32_t k3 = 0;
|
||||
uint32_t k4 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k4 ^= tail[14] << 16;
|
||||
case 14: k4 ^= tail[13] << 8;
|
||||
case 13: k4 ^= tail[12] << 0;
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
case 12: k3 ^= tail[11] << 24;
|
||||
case 11: k3 ^= tail[10] << 16;
|
||||
case 10: k3 ^= tail[ 9] << 8;
|
||||
case 9: k3 ^= tail[ 8] << 0;
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
case 8: k2 ^= tail[ 7] << 24;
|
||||
case 7: k2 ^= tail[ 6] << 16;
|
||||
case 6: k2 ^= tail[ 5] << 8;
|
||||
case 5: k2 ^= tail[ 4] << 0;
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
case 4: k1 ^= tail[ 3] << 24;
|
||||
case 3: k1 ^= tail[ 2] << 16;
|
||||
case 2: k1 ^= tail[ 1] << 8;
|
||||
case 1: k1 ^= tail[ 0] << 0;
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
h2 = hash_fmix_32(h2);
|
||||
h3 = hash_fmix_32(h3);
|
||||
h4 = hash_fmix_32(h4);
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
r_out[0] = (((uint64_t) h2) << 32) | h1;
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
|
||||
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint64_t *blocks = (const uint64_t *) (data);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
|
||||
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_64(h1, 27); h1 += h2;
|
||||
h1 = h1*5 + 0x52dce729;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_64(h2, 31); h2 += h1;
|
||||
h2 = h2*5 + 0x38495ab5;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
|
||||
uint64_t k1 = 0;
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = hash_fmix_64(h1);
|
||||
h2 = hash_fmix_64(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
r_out[0] = h1;
|
||||
r_out[1] = h2;
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
JEMALLOC_INLINE void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||
{
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -226,6 +226,7 @@ static const bool config_ivsalloc =
|
||||
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
|
||||
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
|
@ -65,6 +65,7 @@
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
|
||||
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||
@ -176,6 +177,15 @@
|
||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||
#define get_errno JEMALLOC_N(get_errno)
|
||||
#define hash JEMALLOC_N(hash)
|
||||
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
|
||||
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
|
||||
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
|
||||
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
|
||||
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
|
||||
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
|
||||
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
|
||||
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
|
||||
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||
@ -293,12 +303,14 @@
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
|
||||
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||
#define quarantine JEMALLOC_N(quarantine)
|
||||
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
|
||||
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||
#define register_zone JEMALLOC_N(register_zone)
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
@ -342,6 +354,7 @@
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
@ -357,6 +370,7 @@
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
|
||||
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
@ -365,5 +379,6 @@
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
|
||||
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||
#define u2rz JEMALLOC_N(u2rz)
|
||||
|
81
src/ckh.c
81
src/ckh.c
@ -70,20 +70,20 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
|
||||
JEMALLOC_INLINE size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key)
|
||||
{
|
||||
size_t hash1, hash2, bucket, cell;
|
||||
size_t hashes[2], bucket, cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Search primary bucket. */
|
||||
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
if (cell != SIZE_T_MAX)
|
||||
return (cell);
|
||||
|
||||
/* Search secondary bucket. */
|
||||
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
return (cell);
|
||||
}
|
||||
@ -126,7 +126,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
{
|
||||
const void *key, *data, *tkey, *tdata;
|
||||
ckhc_t *cell;
|
||||
size_t hash1, hash2, bucket, tbucket;
|
||||
size_t hashes[2], bucket, tbucket;
|
||||
unsigned i;
|
||||
|
||||
bucket = argbucket;
|
||||
@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
#endif
|
||||
|
||||
/* Find the alternate bucket for the evicted item. */
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
ckh->hash(key, hashes);
|
||||
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (tbucket == bucket) {
|
||||
tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
|
||||
- 1);
|
||||
/*
|
||||
* It may be that (tbucket == bucket) still, if the
|
||||
* item's hashes both indicate this bucket. However,
|
||||
@ -192,19 +193,19 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
JEMALLOC_INLINE bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
||||
{
|
||||
size_t hash1, hash2, bucket;
|
||||
size_t hashes[2], bucket;
|
||||
const void *key = *argkey;
|
||||
const void *data = *argdata;
|
||||
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Try to insert in primary bucket. */
|
||||
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
|
||||
/* Try to insert in secondary bucket. */
|
||||
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
|
||||
@ -526,31 +527,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
}
|
||||
|
||||
void
|
||||
ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
||||
ckh_string_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
size_t ret1, ret2;
|
||||
uint64_t h;
|
||||
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
|
||||
h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
|
||||
if (minbits <= 32) {
|
||||
/*
|
||||
* Avoid doing multiple hashes, since a single hash provides
|
||||
* enough bits.
|
||||
*/
|
||||
ret1 = h & ZU(0xffffffffU);
|
||||
ret2 = h >> 32;
|
||||
} else {
|
||||
ret1 = h;
|
||||
ret2 = hash(key, strlen((const char *)key),
|
||||
UINT64_C(0x8432a476666bbc13));
|
||||
}
|
||||
|
||||
*hash1 = ret1;
|
||||
*hash2 = ret2;
|
||||
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -564,41 +544,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
|
||||
}
|
||||
|
||||
void
|
||||
ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
|
||||
size_t *hash2)
|
||||
ckh_pointer_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
size_t ret1, ret2;
|
||||
uint64_t h;
|
||||
union {
|
||||
const void *v;
|
||||
uint64_t i;
|
||||
size_t i;
|
||||
} u;
|
||||
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
|
||||
assert(sizeof(u.v) == sizeof(u.i));
|
||||
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
|
||||
u.i = 0;
|
||||
#endif
|
||||
u.v = key;
|
||||
h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
|
||||
if (minbits <= 32) {
|
||||
/*
|
||||
* Avoid doing multiple hashes, since a single hash provides
|
||||
* enough bits.
|
||||
*/
|
||||
ret1 = h & ZU(0xffffffffU);
|
||||
ret2 = h >> 32;
|
||||
} else {
|
||||
assert(SIZEOF_PTR == 8);
|
||||
ret1 = h;
|
||||
ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
|
||||
}
|
||||
|
||||
*hash1 = ret1;
|
||||
*hash2 = ret2;
|
||||
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
|
28
src/prof.c
28
src/prof.c
@ -90,8 +90,7 @@ static bool prof_dump(bool propagate_err, const char *filename,
|
||||
bool leakcheck);
|
||||
static void prof_dump_filename(char *filename, char v, int64_t vseq);
|
||||
static void prof_fdump(void);
|
||||
static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
|
||||
size_t *hash2);
|
||||
static void prof_bt_hash(const void *key, size_t r_hash[2]);
|
||||
static bool prof_bt_keycomp(const void *k1, const void *k2);
|
||||
static malloc_mutex_t *prof_ctx_mutex_choose(void);
|
||||
|
||||
@ -1043,34 +1042,13 @@ prof_gdump(void)
|
||||
}
|
||||
|
||||
static void
|
||||
prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
||||
prof_bt_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
size_t ret1, ret2;
|
||||
uint64_t h;
|
||||
prof_bt_t *bt = (prof_bt_t *)key;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
|
||||
h = hash(bt->vec, bt->len * sizeof(void *),
|
||||
UINT64_C(0x94122f335b332aea));
|
||||
if (minbits <= 32) {
|
||||
/*
|
||||
* Avoid doing multiple hashes, since a single hash provides
|
||||
* enough bits.
|
||||
*/
|
||||
ret1 = h & ZU(0xffffffffU);
|
||||
ret2 = h >> 32;
|
||||
} else {
|
||||
ret1 = h;
|
||||
ret2 = hash(bt->vec, bt->len * sizeof(void *),
|
||||
UINT64_C(0x8432a476666bbc13));
|
||||
}
|
||||
|
||||
*hash1 = ret1;
|
||||
*hash2 = ret2;
|
||||
hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
Loading…
Reference in New Issue
Block a user