3 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
4 * code any way you wish, private, educational, or commercial. It's free.
5 * Use for hash table lookup, or anything where one collision in 2^^32 is
6 * acceptable. Do NOT use for cryptographic purposes.
7 * http://burtleburtle.net/bob/hash/index.html
9 * Modified by Brian Pontz for libmemcached
11 * Add big endian support
16 #define hashsize(n) ((uint32_t)1<<(n))
17 #define hashmask(n) (hashsize(n)-1)
18 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
22 a -= c; a ^= rot(c, 4); c += b; \
23 b -= a; b ^= rot(a, 6); a += c; \
24 c -= b; c ^= rot(b, 8); b += a; \
25 a -= c; a ^= rot(c,16); c += b; \
26 b -= a; b ^= rot(a,19); a += c; \
27 c -= b; c ^= rot(b, 4); b += a; \
30 #define final(a,b,c) \
32 c ^= b; c -= rot(b,14); \
33 a ^= c; a -= rot(c,11); \
34 b ^= a; b -= rot(a,25); \
35 c ^= b; c -= rot(b,16); \
36 a ^= c; a -= rot(c,4); \
37 b ^= a; b -= rot(a,14); \
38 c ^= b; c -= rot(b,24); \
42 jenkins_hash() -- hash a variable-length key into a 32-bit value
43 k : the key (the unaligned variable-length array of bytes)
44 length : the length of the key, counting by bytes
45 initval : can be any 4-byte value
46 Returns a 32-bit value. Every bit of the key affects every bit of
47 the return value. Two keys differing by one or two bits will have
48 totally different hash values.
50 The best hash table sizes are powers of 2. There is no need to do
51 mod a prime (mod is sooo slow!). If you need less than 32 bits,
52 use a bitmask. For example, if you need only 10 bits, do
53 h = (h & hashmask(10));
54 In which case, the hash table should have hashsize(10) elements.
57 uint32_t jenkins_hash(const void *key
, size_t length
, uint32_t initval
)
59 uint32_t a
,b
,c
; /* internal state */
60 union { const void *ptr
; size_t i
; } u
; /* needed for Mac Powerbook G4 */
62 /* Set up the internal state */
63 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
66 #ifdef BYTEORDER_LITTLE_ENDIAN
69 const uint32_t *k
= (const uint32_t *)key
; /* read 32-bit chunks */
71 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
82 /*----------------------------- handle the last (probably partial) block */
84 * "k[2]&0xffffff" actually reads beyond the end of the string, but
85 * then masks off the part it's not allowed to read. Because the
86 * string is aligned, the masked-off tail is in the same word as the
87 * rest of the string. Every machine with memory protection I've seen
88 * does it on word boundaries, so is OK with this. But VALGRIND will
89 * still catch it and complain. The masking trick does make the hash
90 * noticably faster for short strings (like English words).
94 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
95 case 11: c
+=k
[2]&0xffffff; b
+=k
[1]; a
+=k
[0]; break;
96 case 10: c
+=k
[2]&0xffff; b
+=k
[1]; a
+=k
[0]; break;
97 case 9 : c
+=k
[2]&0xff; b
+=k
[1]; a
+=k
[0]; break;
98 case 8 : b
+=k
[1]; a
+=k
[0]; break;
99 case 7 : b
+=k
[1]&0xffffff; a
+=k
[0]; break;
100 case 6 : b
+=k
[1]&0xffff; a
+=k
[0]; break;
101 case 5 : b
+=k
[1]&0xff; a
+=k
[0]; break;
102 case 4 : a
+=k
[0]; break;
103 case 3 : a
+=k
[0]&0xffffff; break;
104 case 2 : a
+=k
[0]&0xffff; break;
105 case 1 : a
+=k
[0]&0xff; break;
106 case 0 : return c
; /* zero length strings require no mixing */
111 else if ((u
.i
& 0x1) == 0)
113 const uint16_t *k
= (const uint16_t *)key
; /* read 16-bit chunks */
116 /*--------------- all but last block: aligned reads and different mixing */
119 a
+= k
[0] + (((uint32_t)k
[1])<<16);
120 b
+= k
[2] + (((uint32_t)k
[3])<<16);
121 c
+= k
[4] + (((uint32_t)k
[5])<<16);
127 /*----------------------------- handle the last (probably partial) block */
128 k8
= (const uint8_t *)k
;
131 case 12: c
+=k
[4]+(((uint32_t)k
[5])<<16);
132 b
+=k
[2]+(((uint32_t)k
[3])<<16);
133 a
+=k
[0]+(((uint32_t)k
[1])<<16);
135 case 11: c
+=((uint32_t)k8
[10])<<16; /* fall through */
137 b
+=k
[2]+(((uint32_t)k
[3])<<16);
138 a
+=k
[0]+(((uint32_t)k
[1])<<16);
140 case 9 : c
+=k8
[8]; /* fall through */
141 case 8 : b
+=k
[2]+(((uint32_t)k
[3])<<16);
142 a
+=k
[0]+(((uint32_t)k
[1])<<16);
144 case 7 : b
+=((uint32_t)k8
[6])<<16; /* fall through */
146 a
+=k
[0]+(((uint32_t)k
[1])<<16);
148 case 5 : b
+=k8
[4]; /* fall through */
149 case 4 : a
+=k
[0]+(((uint32_t)k
[1])<<16);
151 case 3 : a
+=((uint32_t)k8
[2])<<16; /* fall through */
156 case 0 : return c
; /* zero length requires no mixing */
162 { /* need to read the key one byte at a time */
163 #endif /* little endian */
164 const uint8_t *k
= (const uint8_t *)key
;
166 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
170 a
+= ((uint32_t)k
[1])<<8;
171 a
+= ((uint32_t)k
[2])<<16;
172 a
+= ((uint32_t)k
[3])<<24;
174 b
+= ((uint32_t)k
[5])<<8;
175 b
+= ((uint32_t)k
[6])<<16;
176 b
+= ((uint32_t)k
[7])<<24;
178 c
+= ((uint32_t)k
[9])<<8;
179 c
+= ((uint32_t)k
[10])<<16;
180 c
+= ((uint32_t)k
[11])<<24;
186 /*-------------------------------- last block: affect all 32 bits of (c) */
187 switch(length
) /* all the case statements fall through */
189 case 12: c
+=((uint32_t)k
[11])<<24;
190 case 11: c
+=((uint32_t)k
[10])<<16;
191 case 10: c
+=((uint32_t)k
[9])<<8;
193 case 8 : b
+=((uint32_t)k
[7])<<24;
194 case 7 : b
+=((uint32_t)k
[6])<<16;
195 case 6 : b
+=((uint32_t)k
[5])<<8;
197 case 4 : a
+=((uint32_t)k
[3])<<24;
198 case 3 : a
+=((uint32_t)k
[2])<<16;
199 case 2 : a
+=((uint32_t)k
[1])<<8;
205 #ifdef BYTEORDER_LITTLE_ENDIAN