diff --git a/Makefile.am b/Makefile.am index 70be448bbc..ba39e600f4 100644 --- a/Makefile.am +++ b/Makefile.am @@ -10,6 +10,8 @@ timedrun_SOURCES = timedrun.c memcached_SOURCES = memcached.c memcached.h \ hash.c hash.h \ + jenkins_hash.c jenkins_hash.h \ + murmur3_hash.c murmur3_hash.h \ slabs.c slabs.h \ items.c items.h \ assoc.c assoc.h \ diff --git a/assoc.c b/assoc.c index 0fc5d18696..5558be1e9f 100644 --- a/assoc.c +++ b/assoc.c @@ -218,7 +218,7 @@ static void *assoc_maintenance_thread(void *arg) { for (it = old_hashtable[expand_bucket]; NULL != it; it = next) { next = it->h_next; - bucket = hash(ITEM_key(it), it->nkey, 0) & hashmask(hashpower); + bucket = hash(ITEM_key(it), it->nkey) & hashmask(hashpower); it->h_next = primary_hashtable[bucket]; primary_hashtable[bucket] = it; } diff --git a/doc/protocol.txt b/doc/protocol.txt index 1d8f493b26..c107fe2c6e 100644 --- a/doc/protocol.txt +++ b/doc/protocol.txt @@ -559,6 +559,7 @@ other stats command. | hashpower_init | 32 | Starting size multiplier for hash table | | slab_reassign | bool | Whether slab page reassignment is allowed | | slab_automove | bool | Whether slab page automover is enabled | +| hash_algorithm | char | Hash table algorithm in use | |-------------------+----------+----------------------------------------------| diff --git a/hash.c b/hash.c index fcfc1ffcd3..a0c30369fa 100644 --- a/hash.c +++ b/hash.c @@ -1,431 +1,21 @@ /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -/* - * Hash table - * - * The hash function used here is by Bob Jenkins, 1996: - * - * "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. - * You may use this code any way you wish, private, educational, - * or commercial. It's free." - * - */ -#include "memcached.h" - -/* - * Since the hash function does bit manipulation, it needs to know - * whether it's big or little-endian. ENDIAN_LITTLE and ENDIAN_BIG - * are set in the configure script. - */ -#if ENDIAN_BIG == 1 -# define HASH_LITTLE_ENDIAN 0 -# define HASH_BIG_ENDIAN 1 -#else -# if ENDIAN_LITTLE == 1 -# define HASH_LITTLE_ENDIAN 1 -# define HASH_BIG_ENDIAN 0 -# else -# define HASH_LITTLE_ENDIAN 0 -# define HASH_BIG_ENDIAN 0 -# endif -#endif - -#define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k)))) - -/* -------------------------------------------------------------------------------- -mix -- mix 3 32-bit values reversibly. - -This is reversible, so any information in (a,b,c) before mix() is -still in (a,b,c) after mix(). - -If four pairs of (a,b,c) inputs are run through mix(), or through -mix() in reverse, there are at least 32 bits of the output that -are sometimes the same for one pair and different for another pair. -This was tested for: -* pairs that differed by one bit, by two bits, in any combination - of top bits of (a,b,c), or in any combination of bottom bits of - (a,b,c). -* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - is commonly produced by subtraction) look like a single 1-bit - difference. -* the base values were pseudorandom, all zero but one bit set, or - all zero plus a counter that starts at zero. - -Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that -satisfy this are - 4 6 8 16 19 4 - 9 15 3 18 27 15 - 14 9 3 7 17 3 -Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing -for "differ" defined as + with a one-bit base and a two-bit delta. I -used http://burtleburtle.net/bob/hash/avalanche.html to choose -the operations, constants, and arrangements of the variables. - -This does not achieve avalanche. There are input bits of (a,b,c) -that fail to affect some output bits of (a,b,c), especially of a. The -most thoroughly mixed value is c, but it doesn't really even achieve -avalanche in c. - -This allows some parallelism. Read-after-writes are good at doubling -the number of bits affected, so the goal of mixing pulls in the opposite -direction as the goal of parallelism. I did what I could. Rotates -seem to cost as much as shifts on every machine I could lay my hands -on, and rotates are much kinder to the top and bottom bits, so I used -rotates. -------------------------------------------------------------------------------- -*/ -#define mix(a,b,c) \ -{ \ - a -= c; a ^= rot(c, 4); c += b; \ - b -= a; b ^= rot(a, 6); a += c; \ - c -= b; c ^= rot(b, 8); b += a; \ - a -= c; a ^= rot(c,16); c += b; \ - b -= a; b ^= rot(a,19); a += c; \ - c -= b; c ^= rot(b, 4); b += a; \ -} - -/* -------------------------------------------------------------------------------- -final -- final mixing of 3 32-bit values (a,b,c) into c - -Pairs of (a,b,c) values differing in only a few bits will usually -produce values of c that look totally different. This was tested for -* pairs that differed by one bit, by two bits, in any combination - of top bits of (a,b,c), or in any combination of bottom bits of - (a,b,c). -* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - is commonly produced by subtraction) look like a single 1-bit - difference. -* the base values were pseudorandom, all zero but one bit set, or - all zero plus a counter that starts at zero. - -These constants passed: - 14 11 25 16 4 14 24 - 12 14 25 16 4 14 24 -and these came close: - 4 8 15 26 3 22 24 - 10 8 15 26 3 22 24 - 11 8 15 26 3 22 24 -------------------------------------------------------------------------------- -*/ -#define final(a,b,c) \ -{ \ - c ^= b; c -= rot(b,14); \ - a ^= c; a -= rot(c,11); \ - b ^= a; b -= rot(a,25); \ - c ^= b; c -= rot(b,16); \ - a ^= c; a -= rot(c,4); \ - b ^= a; b -= rot(a,14); \ - c ^= b; c -= rot(b,24); \ -} - -#if HASH_LITTLE_ENDIAN == 1 -uint32_t hash( - const void *key, /* the key to hash */ - size_t length, /* length of the key */ - const uint32_t initval) /* initval */ -{ - uint32_t a,b,c; /* internal state */ - union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ - - /* Set up the internal state */ - a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; - - u.ptr = key; - if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { - const uint32_t *k = key; /* read 32-bit chunks */ -#ifdef VALGRIND - const uint8_t *k8; -#endif /* ifdef VALGRIND */ - - /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ - while (length > 12) - { - a += k[0]; - b += k[1]; - c += k[2]; - mix(a,b,c); - length -= 12; - k += 3; - } - - /*----------------------------- handle the last (probably partial) block */ - /* - * "k[2]&0xffffff" actually reads beyond the end of the string, but - * then masks off the part it's not allowed to read. Because the - * string is aligned, the masked-off tail is in the same word as the - * rest of the string. Every machine with memory protection I've seen - * does it on word boundaries, so is OK with this. But VALGRIND will - * still catch it and complain. The masking trick does make the hash - * noticably faster for short strings (like English words). - */ -#ifndef VALGRIND - - switch(length) - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; - case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; - case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=k[1]&0xffffff; a+=k[0]; break; - case 6 : b+=k[1]&0xffff; a+=k[0]; break; - case 5 : b+=k[1]&0xff; a+=k[0]; break; - case 4 : a+=k[0]; break; - case 3 : a+=k[0]&0xffffff; break; - case 2 : a+=k[0]&0xffff; break; - case 1 : a+=k[0]&0xff; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#else /* make valgrind happy */ - - k8 = (const uint8_t *)k; - switch(length) - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ - case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ - case 9 : c+=k8[8]; /* fall through */ - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ - case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ - case 5 : b+=k8[4]; /* fall through */ - case 4 : a+=k[0]; break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ - case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ - case 1 : a+=k8[0]; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#endif /* !valgrind */ - - } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { - const uint16_t *k = key; /* read 16-bit chunks */ - const uint8_t *k8; - - /*--------------- all but last block: aligned reads and different mixing */ - while (length > 12) - { - a += k[0] + (((uint32_t)k[1])<<16); - b += k[2] + (((uint32_t)k[3])<<16); - c += k[4] + (((uint32_t)k[5])<<16); - mix(a,b,c); - length -= 12; - k += 6; - } - - /*----------------------------- handle the last (probably partial) block */ - k8 = (const uint8_t *)k; - switch(length) - { - case 12: c+=k[4]+(((uint32_t)k[5])<<16); - b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 11: c+=((uint32_t)k8[10])<<16; /* @fallthrough */ - case 10: c+=k[4]; /* @fallthrough@ */ - b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 9 : c+=k8[8]; /* @fallthrough */ - case 8 : b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 7 : b+=((uint32_t)k8[6])<<16; /* @fallthrough */ - case 6 : b+=k[2]; - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 5 : b+=k8[4]; /* @fallthrough */ - case 4 : a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 3 : a+=((uint32_t)k8[2])<<16; /* @fallthrough */ - case 2 : a+=k[0]; - break; - case 1 : a+=k8[0]; - break; - case 0 : return c; /* zero length strings require no mixing */ - } - - } else { /* need to read the key one byte at a time */ - const uint8_t *k = key; - - /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ - while (length > 12) - { - a += k[0]; - a += ((uint32_t)k[1])<<8; - a += ((uint32_t)k[2])<<16; - a += ((uint32_t)k[3])<<24; - b += k[4]; - b += ((uint32_t)k[5])<<8; - b += ((uint32_t)k[6])<<16; - b += ((uint32_t)k[7])<<24; - c += k[8]; - c += ((uint32_t)k[9])<<8; - c += ((uint32_t)k[10])<<16; - c += ((uint32_t)k[11])<<24; - mix(a,b,c); - length -= 12; - k += 12; - } - - /*-------------------------------- last block: affect all 32 bits of (c) */ - switch(length) /* all the case statements fall through */ - { - case 12: c+=((uint32_t)k[11])<<24; - case 11: c+=((uint32_t)k[10])<<16; - case 10: c+=((uint32_t)k[9])<<8; - case 9 : c+=k[8]; - case 8 : b+=((uint32_t)k[7])<<24; - case 7 : b+=((uint32_t)k[6])<<16; - case 6 : b+=((uint32_t)k[5])<<8; - case 5 : b+=k[4]; - case 4 : a+=((uint32_t)k[3])<<24; - case 3 : a+=((uint32_t)k[2])<<16; - case 2 : a+=((uint32_t)k[1])<<8; - case 1 : a+=k[0]; - break; - case 0 : return c; /* zero length strings require no mixing */ - } - } - - final(a,b,c); - return c; /* zero length strings require no mixing */ -} - -#elif HASH_BIG_ENDIAN == 1 -/* - * hashbig(): - * This is the same as hashword() on big-endian machines. It is different - * from hashlittle() on all machines. hashbig() takes advantage of - * big-endian byte ordering. - */ -uint32_t hash( const void *key, size_t length, const uint32_t initval) -{ - uint32_t a,b,c; - union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ - - /* Set up the internal state */ - a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; - - u.ptr = key; - if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { - const uint32_t *k = key; /* read 32-bit chunks */ -#ifdef VALGRIND - const uint8_t *k8; -#endif /* ifdef VALGRIND */ - - /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ - while (length > 12) - { - a += k[0]; - b += k[1]; - c += k[2]; - mix(a,b,c); - length -= 12; - k += 3; - } - - /*----------------------------- handle the last (probably partial) block */ - /* - * "k[2]<<8" actually reads beyond the end of the string, but - * then shifts out the part it's not allowed to read. Because the - * string is aligned, the illegal read is in the same word as the - * rest of the string. Every machine with memory protection I've seen - * does it on word boundaries, so is OK with this. But VALGRIND will - * still catch it and complain. The masking trick does make the hash - * noticably faster for short strings (like English words). - */ -#ifndef VALGRIND - - switch(length) - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; - case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; - case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; - case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; - case 5 : b+=k[1]&0xff000000; a+=k[0]; break; - case 4 : a+=k[0]; break; - case 3 : a+=k[0]&0xffffff00; break; - case 2 : a+=k[0]&0xffff0000; break; - case 1 : a+=k[0]&0xff000000; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#else /* make valgrind happy */ - - k8 = (const uint8_t *)k; - switch(length) /* all the case statements fall through */ - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ - case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ - case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ - case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ - case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ - case 4 : a+=k[0]; break; - case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ - case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ - case 1 : a+=((uint32_t)k8[0])<<24; break; - case 0 : return c; - } -#endif /* !VALGRIND */ - - } else { /* need to read the key one byte at a time */ - const uint8_t *k = key; - - /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ - while (length > 12) - { - a += ((uint32_t)k[0])<<24; - a += ((uint32_t)k[1])<<16; - a += ((uint32_t)k[2])<<8; - a += ((uint32_t)k[3]); - b += ((uint32_t)k[4])<<24; - b += ((uint32_t)k[5])<<16; - b += ((uint32_t)k[6])<<8; - b += ((uint32_t)k[7]); - c += ((uint32_t)k[8])<<24; - c += ((uint32_t)k[9])<<16; - c += ((uint32_t)k[10])<<8; - c += ((uint32_t)k[11]); - mix(a,b,c); - length -= 12; - k += 12; - } - - /*-------------------------------- last block: affect all 32 bits of (c) */ - switch(length) /* all the case statements fall through */ - { - case 12: c+=k[11]; - case 11: c+=((uint32_t)k[10])<<8; - case 10: c+=((uint32_t)k[9])<<16; - case 9 : c+=((uint32_t)k[8])<<24; - case 8 : b+=k[7]; - case 7 : b+=((uint32_t)k[6])<<8; - case 6 : b+=((uint32_t)k[5])<<16; - case 5 : b+=((uint32_t)k[4])<<24; - case 4 : a+=k[3]; - case 3 : a+=((uint32_t)k[2])<<8; - case 2 : a+=((uint32_t)k[1])<<16; - case 1 : a+=((uint32_t)k[0])<<24; - break; - case 0 : return c; - } - } - - final(a,b,c); - return c; +#include "memcached.h" +#include "jenkins_hash.h" +#include "murmur3_hash.h" + +int hash_init(enum hashfunc_type type) { + switch(type) { + case JENKINS_HASH: + hash = jenkins_hash; + settings.hash_algorithm = "jenkins"; + break; + case MURMUR3_HASH: + hash = MurmurHash3_x86_32; + settings.hash_algorithm = "murmur3"; + break; + default: + return -1; + } + return 0; } -#else /* HASH_XXX_ENDIAN == 1 */ -#error Must define HASH_BIG_ENDIAN or HASH_LITTLE_ENDIAN -#endif /* HASH_XXX_ENDIAN == 1 */ diff --git a/hash.h b/hash.h index aa02c58fbe..059d1e2a67 100644 --- a/hash.h +++ b/hash.h @@ -1,15 +1,14 @@ #ifndef HASH_H #define HASH_H -#ifdef __cplusplus -extern "C" { -#endif +typedef uint32_t (*hash_func)(const void *key, size_t length); +hash_func hash; -uint32_t hash(const void *key, size_t length, const uint32_t initval); +enum hashfunc_type { + JENKINS_HASH=0, MURMUR3_HASH +}; -#ifdef __cplusplus -} -#endif +int hash_init(enum hashfunc_type type); #endif /* HASH_H */ diff --git a/items.c b/items.c index 91ac6caec8..ff661d0ba0 100644 --- a/items.c +++ b/items.c @@ -112,7 +112,7 @@ item *do_item_alloc(char *key, const size_t nkey, const int flags, /* We walk up *only* for locked items. Never searching for expired. * Waste of CPU for almost all deployments */ for (; tries > 0 && search != NULL; tries--, search=search->prev) { - uint32_t hv = hash(ITEM_key(search), search->nkey, 0); + uint32_t hv = hash(ITEM_key(search), search->nkey); /* Attempt to hash item lock the "search" item. If locked, no * other callers can incr the refcount */ @@ -603,7 +603,7 @@ void do_item_flush_expired(void) { if (iter->time >= settings.oldest_live) { next = iter->next; if ((iter->it_flags & ITEM_SLABBED) == 0) { - do_item_unlink_nolock(iter, hash(ITEM_key(iter), iter->nkey, 0)); + do_item_unlink_nolock(iter, hash(ITEM_key(iter), iter->nkey)); } } else { /* We've hit the first old item. Continue to the next queue. */ diff --git a/jenkins_hash.c b/jenkins_hash.c new file mode 100644 index 0000000000..b60cff48ae --- /dev/null +++ b/jenkins_hash.c @@ -0,0 +1,431 @@ +/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +/* + * Hash table + * + * The hash function used here is by Bob Jenkins, 1996: + * + * "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. + * You may use this code any way you wish, private, educational, + * or commercial. It's free." + * + */ +#include "memcached.h" +#include "jenkins_hash.h" + +/* + * Since the hash function does bit manipulation, it needs to know + * whether it's big or little-endian. ENDIAN_LITTLE and ENDIAN_BIG + * are set in the configure script. + */ +#if ENDIAN_BIG == 1 +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +#else +# if ENDIAN_LITTLE == 1 +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +# else +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 0 +# endif +#endif + +#define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k)))) + +/* +------------------------------------------------------------------------------- +mix -- mix 3 32-bit values reversibly. + +This is reversible, so any information in (a,b,c) before mix() is +still in (a,b,c) after mix(). + +If four pairs of (a,b,c) inputs are run through mix(), or through +mix() in reverse, there are at least 32 bits of the output that +are sometimes the same for one pair and different for another pair. +This was tested for: +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that +satisfy this are + 4 6 8 16 19 4 + 9 15 3 18 27 15 + 14 9 3 7 17 3 +Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing +for "differ" defined as + with a one-bit base and a two-bit delta. I +used http://burtleburtle.net/bob/hash/avalanche.html to choose +the operations, constants, and arrangements of the variables. + +This does not achieve avalanche. There are input bits of (a,b,c) +that fail to affect some output bits of (a,b,c), especially of a. The +most thoroughly mixed value is c, but it doesn't really even achieve +avalanche in c. + +This allows some parallelism. Read-after-writes are good at doubling +the number of bits affected, so the goal of mixing pulls in the opposite +direction as the goal of parallelism. I did what I could. Rotates +seem to cost as much as shifts on every machine I could lay my hands +on, and rotates are much kinder to the top and bottom bits, so I used +rotates. +------------------------------------------------------------------------------- +*/ +#define mix(a,b,c) \ +{ \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c,16); c += b; \ + b -= a; b ^= rot(a,19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} + +/* +------------------------------------------------------------------------------- +final -- final mixing of 3 32-bit values (a,b,c) into c + +Pairs of (a,b,c) values differing in only a few bits will usually +produce values of c that look totally different. This was tested for +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +These constants passed: + 14 11 25 16 4 14 24 + 12 14 25 16 4 14 24 +and these came close: + 4 8 15 26 3 22 24 + 10 8 15 26 3 22 24 + 11 8 15 26 3 22 24 +------------------------------------------------------------------------------- +*/ +#define final(a,b,c) \ +{ \ + c ^= b; c -= rot(b,14); \ + a ^= c; a -= rot(c,11); \ + b ^= a; b -= rot(a,25); \ + c ^= b; c -= rot(b,16); \ + a ^= c; a -= rot(c,4); \ + b ^= a; b -= rot(a,14); \ + c ^= b; c -= rot(b,24); \ +} + +#if HASH_LITTLE_ENDIAN == 1 +uint32_t jenkins_hash( + const void *key, /* the key to hash */ + size_t length) /* length of the key */ +{ + uint32_t a,b,c; /* internal state */ + union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + 0; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif /* ifdef VALGRIND */ + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* @fallthrough */ + case 10: c+=k[4]; /* @fallthrough@ */ + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* @fallthrough */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* @fallthrough */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* @fallthrough */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* @fallthrough */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length strings require no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : return c; /* zero length strings require no mixing */ + } + } + + final(a,b,c); + return c; /* zero length strings require no mixing */ +} + +#elif HASH_BIG_ENDIAN == 1 +/* + * hashbig(): + * This is the same as hashword() on big-endian machines. It is different + * from hashlittle() on all machines. hashbig() takes advantage of + * big-endian byte ordering. + */ +uint32_t jenkins_hash( const void *key, size_t length) +{ + uint32_t a,b,c; + union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + 0; + + u.ptr = key; + if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif /* ifdef VALGRIND */ + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]<<8" actually reads beyond the end of the string, but + * then shifts out the part it's not allowed to read. Because the + * string is aligned, the illegal read is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; + case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; + case 5 : b+=k[1]&0xff000000; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff00; break; + case 2 : a+=k[0]&0xffff0000; break; + case 1 : a+=k[0]&0xff000000; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ + case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ + case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ + case 1 : a+=((uint32_t)k8[0])<<24; break; + case 0 : return c; + } + +#endif /* !VALGRIND */ + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += ((uint32_t)k[0])<<24; + a += ((uint32_t)k[1])<<16; + a += ((uint32_t)k[2])<<8; + a += ((uint32_t)k[3]); + b += ((uint32_t)k[4])<<24; + b += ((uint32_t)k[5])<<16; + b += ((uint32_t)k[6])<<8; + b += ((uint32_t)k[7]); + c += ((uint32_t)k[8])<<24; + c += ((uint32_t)k[9])<<16; + c += ((uint32_t)k[10])<<8; + c += ((uint32_t)k[11]); + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[11]; + case 11: c+=((uint32_t)k[10])<<8; + case 10: c+=((uint32_t)k[9])<<16; + case 9 : c+=((uint32_t)k[8])<<24; + case 8 : b+=k[7]; + case 7 : b+=((uint32_t)k[6])<<8; + case 6 : b+=((uint32_t)k[5])<<16; + case 5 : b+=((uint32_t)k[4])<<24; + case 4 : a+=k[3]; + case 3 : a+=((uint32_t)k[2])<<8; + case 2 : a+=((uint32_t)k[1])<<16; + case 1 : a+=((uint32_t)k[0])<<24; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} +#else /* HASH_XXX_ENDIAN == 1 */ +#error Must define HASH_BIG_ENDIAN or HASH_LITTLE_ENDIAN +#endif /* HASH_XXX_ENDIAN == 1 */ diff --git a/jenkins_hash.h b/jenkins_hash.h new file mode 100644 index 0000000000..b44fba1eef --- /dev/null +++ b/jenkins_hash.h @@ -0,0 +1,15 @@ +#ifndef JENKINS_HASH_H +#define JENKINS_HASH_H + +#ifdef __cplusplus +extern "C" { +#endif + +uint32_t jenkins_hash(const void *key, size_t length); + +#ifdef __cplusplus +} +#endif + +#endif /* JENKINS_HASH_H */ + diff --git a/memcached.c b/memcached.c index 7a1749889e..d6bf14f092 100644 --- a/memcached.c +++ b/memcached.c @@ -2651,6 +2651,7 @@ static void process_stat_settings(ADD_STAT add_stats, void *c) { APPEND_STAT("slab_automove", "%d", settings.slab_automove); APPEND_STAT("tail_repair_time", "%d", settings.tail_repair_time); APPEND_STAT("flush_enabled", "%s", settings.flush_enabled ? "yes" : "no"); + APPEND_STAT("hash_algorithm", "%s", settings.hash_algorithm); } static void conn_to_str(const conn *c, char *buf) { @@ -4725,6 +4726,8 @@ static void usage(void) { " - tail_repair_time: Time in seconds that indicates how long to wait before\n" " forcefully taking over the LRU tail item whose refcount has leaked.\n" " The default is 3 hours.\n" + " - hash_algorithm: The hash table algorithm\n" + " default is jenkins hash. options: jenkins, murmur3\n" ); return; } @@ -4953,6 +4956,7 @@ int main (int argc, char **argv) { bool protocol_specified = false; bool tcp_specified = false; bool udp_specified = false; + enum hashfunc_type hash_type = JENKINS_HASH; char *subopts; char *subopts_value; @@ -4961,7 +4965,8 @@ int main (int argc, char **argv) { HASHPOWER_INIT, SLAB_REASSIGN, SLAB_AUTOMOVE, - TAIL_REPAIR_TIME + TAIL_REPAIR_TIME, + HASH_ALGORITHM }; char *const subopts_tokens[] = { [MAXCONNS_FAST] = "maxconns_fast", @@ -4969,6 +4974,7 @@ int main (int argc, char **argv) { [SLAB_REASSIGN] = "slab_reassign", [SLAB_AUTOMOVE] = "slab_automove", [TAIL_REPAIR_TIME] = "tail_repair_time", + [HASH_ALGORITHM] = "hash_algorithm", NULL }; @@ -5255,6 +5261,20 @@ int main (int argc, char **argv) { return 1; } break; + case HASH_ALGORITHM: + if (subopts_value == NULL) { + fprintf(stderr, "Missing hash_algorithm argument\n"); + return 1; + }; + if (strcmp(subopts_value, "jenkins") == 0) { + hash_type = JENKINS_HASH; + } else if (strcmp(subopts_value, "murmur3") == 0) { + hash_type = MURMUR3_HASH; + } else { + fprintf(stderr, "Unknown hash_algorithm option (jenkins, murmur3)\n"); + return 1; + } + break; default: printf("Illegal suboption \"%s\"\n", subopts_value); return 1; @@ -5268,6 +5288,11 @@ int main (int argc, char **argv) { } } + if (hash_init(hash_type) != 0) { + fprintf(stderr, "Failed to initialize hash_algorithm!\n"); + exit(EX_USAGE); + } + /* * Use one workerthread to serve each UDP port if the user specified * multiple ports diff --git a/memcached.h b/memcached.h index 59c651b7f3..fab540a07a 100644 --- a/memcached.h +++ b/memcached.h @@ -312,6 +312,7 @@ struct settings { bool shutdown_command; /* allow shutdown command */ int tail_repair_time; /* LRU tail refcount leak repair time */ bool flush_enabled; /* flush_all enabled */ + char *hash_algorithm; /* Hash algorithm in use */ }; extern struct stats stats; diff --git a/murmur3_hash.c b/murmur3_hash.c new file mode 100644 index 0000000000..4aa4f93827 --- /dev/null +++ b/murmur3_hash.c @@ -0,0 +1,124 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +#include "murmur3_hash.h" + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#include + +#define ROTL32(x,y) _rotl(x,y) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE inline __attribute__((always_inline)) + +static inline uint32_t rotl32 ( uint32_t x, int8_t r ) +{ + return (x << r) | (x >> (32 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +static FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) +{ + return p[i]; +} + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +static FORCE_INLINE uint32_t fmix32 ( uint32_t h ) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//----------------------------------------------------------------------------- + +/* Defintion modified slightly from the public domain interface (no seed + + * return value */ +uint32_t MurmurHash3_x86_32 ( const void * key, size_t length) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = length / 4; + + uint32_t h1 = 0; + + uint32_t c1 = 0xcc9e2d51; + uint32_t c2 = 0x1b873593; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i); + + k1 *= c1; + k1 = ROTL32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1,13); + h1 = h1*5+0xe6546b64; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*4); + + uint32_t k1 = 0; + + switch(length & 3) + { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= length; + + h1 = fmix32(h1); + + //*(uint32_t*)out = h1; + return h1; +} + diff --git a/murmur3_hash.h b/murmur3_hash.h new file mode 100644 index 0000000000..e2820e1934 --- /dev/null +++ b/murmur3_hash.h @@ -0,0 +1,19 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef MURMURHASH3_H +#define MURMURHASH3_H + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros +#include +#include + +//----------------------------------------------------------------------------- + +uint32_t MurmurHash3_x86_32(const void *key, size_t length); + +//----------------------------------------------------------------------------- + +#endif // MURMURHASH3_H diff --git a/slabs.c b/slabs.c index 8ad0bf52ef..fb140bf258 100644 --- a/slabs.c +++ b/slabs.c @@ -520,7 +520,7 @@ static int slab_rebalance_move(void) { status = MOVE_PASS; if (it->slabs_clsid != 255) { void *hold_lock = NULL; - uint32_t hv = hash(ITEM_key(it), it->nkey, 0); + uint32_t hv = hash(ITEM_key(it), it->nkey); if ((hold_lock = item_trylock(hv)) == NULL) { status = MOVE_LOCKED; } else { diff --git a/stats.c b/stats.c index 5e6b0406c1..2055f6996d 100644 --- a/stats.c +++ b/stats.c @@ -83,7 +83,7 @@ static PREFIX_STATS *stats_prefix_find(const char *key, const size_t nkey) { return NULL; } - hashval = hash(key, length, 0) % PREFIX_HASH_SIZE; + hashval = hash(key, length) % PREFIX_HASH_SIZE; for (pfs = prefix_stats[hashval]; NULL != pfs; pfs = pfs->next) { if (strncmp(pfs->prefix, key, length) == 0) @@ -295,7 +295,7 @@ static void test_prefix_record_set() { } static void test_prefix_dump() { - int hashval = hash("abc", 3, 0) % PREFIX_HASH_SIZE; + int hashval = hash("abc", 3) % PREFIX_HASH_SIZE; char tmp[500]; char *expected; int keynum; @@ -331,7 +331,7 @@ static void test_prefix_dump() { /* Find a key that hashes to the same bucket as "abc" */ for (keynum = 0; keynum < PREFIX_HASH_SIZE * 100; keynum++) { snprintf(tmp, sizeof(tmp), "%d", keynum); - if (hashval == hash(tmp, strlen(tmp), 0) % PREFIX_HASH_SIZE) { + if (hashval == hash(tmp, strlen(tmp)) % PREFIX_HASH_SIZE) { break; } } diff --git a/t/binary.t b/t/binary.t index 1b6d8c8fb2..05fdcdb935 100755 --- a/t/binary.t +++ b/t/binary.t @@ -2,7 +2,7 @@ use strict; use warnings; -use Test::More tests => 3579; +use Test::More tests => 3582; use FindBin qw($Bin); use lib "$Bin/lib"; use MemcachedTest; diff --git a/thread.c b/thread.c index 80cbac3374..ec8264bb84 100644 --- a/thread.c +++ b/thread.c @@ -502,7 +502,7 @@ item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbyt item *item_get(const char *key, const size_t nkey) { item *it; uint32_t hv; - hv = hash(key, nkey, 0); + hv = hash(key, nkey); item_lock(hv); it = do_item_get(key, nkey, hv); item_unlock(hv); @@ -512,7 +512,7 @@ item *item_get(const char *key, const size_t nkey) { item *item_touch(const char *key, size_t nkey, uint32_t exptime) { item *it; uint32_t hv; - hv = hash(key, nkey, 0); + hv = hash(key, nkey); item_lock(hv); it = do_item_touch(key, nkey, exptime, hv); item_unlock(hv); @@ -526,7 +526,7 @@ int item_link(item *item) { int ret; uint32_t hv; - hv = hash(ITEM_key(item), item->nkey, 0); + hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_item_link(item, hv); item_unlock(hv); @@ -539,7 +539,7 @@ int item_link(item *item) { */ void item_remove(item *item) { uint32_t hv; - hv = hash(ITEM_key(item), item->nkey, 0); + hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_remove(item); @@ -560,7 +560,7 @@ int item_replace(item *old_it, item *new_it, const uint32_t hv) { */ void item_unlink(item *item) { uint32_t hv; - hv = hash(ITEM_key(item), item->nkey, 0); + hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_unlink(item, hv); item_unlock(hv); @@ -571,7 +571,7 @@ void item_unlink(item *item) { */ void item_update(item *item) { uint32_t hv; - hv = hash(ITEM_key(item), item->nkey, 0); + hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_update(item); @@ -588,7 +588,7 @@ enum delta_result_type add_delta(conn *c, const char *key, enum delta_result_type ret; uint32_t hv; - hv = hash(key, nkey, 0); + hv = hash(key, nkey); item_lock(hv); ret = do_add_delta(c, key, nkey, incr, delta, buf, cas, hv); item_unlock(hv); @@ -602,7 +602,7 @@ enum store_item_type store_item(item *item, int comm, conn* c) { enum store_item_type ret; uint32_t hv; - hv = hash(ITEM_key(item), item->nkey, 0); + hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_store_item(item, comm, c, hv); item_unlock(hv);