f7d21dd310c24da74a9a74163279af072b2077bb
1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
5 * The hash function used here is by Bob Jenkins, 1996:
6 * <http://burtleburtle.net/bob/hash/doobs.html>
7 * "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net.
8 * You may use this code any way you wish, private, educational,
9 * or commercial. It's free."
12 #include "memcached.h"
15 * Since the hash function does bit manipulation, it needs to know
16 * whether it's big or little-endian. ENDIAN_LITTLE and ENDIAN_BIG
17 * are set in the configure script.
19 #if defined(ENDIAN_BIG) && ENDIAN_BIG == 1
20 # define HASH_LITTLE_ENDIAN 0
21 # define HASH_BIG_ENDIAN 1
23 # if defined(ENDIAN_LITTLE) && ENDIAN_LITTLE == 1
24 # define HASH_LITTLE_ENDIAN 1
25 # define HASH_BIG_ENDIAN 0
27 # define HASH_LITTLE_ENDIAN 0
28 # define HASH_BIG_ENDIAN 0
32 #define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k))))
35 -------------------------------------------------------------------------------
36 mix -- mix 3 32-bit values reversibly.
38 This is reversible, so any information in (a,b,c) before mix() is
39 still in (a,b,c) after mix().
41 If four pairs of (a,b,c) inputs are run through mix(), or through
42 mix() in reverse, there are at least 32 bits of the output that
43 are sometimes the same for one pair and different for another pair.
45 * pairs that differed by one bit, by two bits, in any combination
46 of top bits of (a,b,c), or in any combination of bottom bits of
48 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
49 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
50 is commonly produced by subtraction) look like a single 1-bit
52 * the base values were pseudorandom, all zero but one bit set, or
53 all zero plus a counter that starts at zero.
55 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
60 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
61 for "differ" defined as + with a one-bit base and a two-bit delta. I
62 used http://burtleburtle.net/bob/hash/avalanche.html to choose
63 the operations, constants, and arrangements of the variables.
65 This does not achieve avalanche. There are input bits of (a,b,c)
66 that fail to affect some output bits of (a,b,c), especially of a. The
67 most thoroughly mixed value is c, but it doesn't really even achieve
70 This allows some parallelism. Read-after-writes are good at doubling
71 the number of bits affected, so the goal of mixing pulls in the opposite
72 direction as the goal of parallelism. I did what I could. Rotates
73 seem to cost as much as shifts on every machine I could lay my hands
74 on, and rotates are much kinder to the top and bottom bits, so I used
76 -------------------------------------------------------------------------------
80 a -= c; a ^= rot(c, 4); c += b; \
81 b -= a; b ^= rot(a, 6); a += c; \
82 c -= b; c ^= rot(b, 8); b += a; \
83 a -= c; a ^= rot(c,16); c += b; \
84 b -= a; b ^= rot(a,19); a += c; \
85 c -= b; c ^= rot(b, 4); b += a; \
89 -------------------------------------------------------------------------------
90 final -- final mixing of 3 32-bit values (a,b,c) into c
92 Pairs of (a,b,c) values differing in only a few bits will usually
93 produce values of c that look totally different. This was tested for
94 * pairs that differed by one bit, by two bits, in any combination
95 of top bits of (a,b,c), or in any combination of bottom bits of
97 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
98 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
99 is commonly produced by subtraction) look like a single 1-bit
101 * the base values were pseudorandom, all zero but one bit set, or
102 all zero plus a counter that starts at zero.
104 These constants passed:
107 and these came close:
111 -------------------------------------------------------------------------------
113 #define final(a,b,c) \
115 c ^= b; c -= rot(b,14); \
116 a ^= c; a -= rot(c,11); \
117 b ^= a; b -= rot(a,25); \
118 c ^= b; c -= rot(b,16); \
119 a ^= c; a -= rot(c,4); \
120 b ^= a; b -= rot(a,14); \
121 c ^= b; c -= rot(b,24); \
124 #if HASH_LITTLE_ENDIAN == 1
126 const void *key
, /* the key to hash */
127 size_t length
, /* length of the key */
128 const uint32_t initval
) /* initval */
130 uint32_t a
,b
,c
; /* internal state */
131 union { const void *ptr
; size_t i
; } u
; /* needed for Mac Powerbook G4 */
133 /* Set up the internal state */
134 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
137 if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
138 const uint32_t *k
= key
; /* read 32-bit chunks */
141 #endif /* ifdef VALGRIND */
143 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
154 /*----------------------------- handle the last (probably partial) block */
156 * "k[2]&0xffffff" actually reads beyond the end of the string, but
157 * then masks off the part it's not allowed to read. Because the
158 * string is aligned, the masked-off tail is in the same word as the
159 * rest of the string. Every machine with memory protection I've seen
160 * does it on word boundaries, so is OK with this. But VALGRIND will
161 * still catch it and complain. The masking trick does make the hash
162 * noticably faster for short strings (like English words).
168 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
169 case 11: c
+=k
[2]&0xffffff; b
+=k
[1]; a
+=k
[0]; break;
170 case 10: c
+=k
[2]&0xffff; b
+=k
[1]; a
+=k
[0]; break;
171 case 9 : c
+=k
[2]&0xff; b
+=k
[1]; a
+=k
[0]; break;
172 case 8 : b
+=k
[1]; a
+=k
[0]; break;
173 case 7 : b
+=k
[1]&0xffffff; a
+=k
[0]; break;
174 case 6 : b
+=k
[1]&0xffff; a
+=k
[0]; break;
175 case 5 : b
+=k
[1]&0xff; a
+=k
[0]; break;
176 case 4 : a
+=k
[0]; break;
177 case 3 : a
+=k
[0]&0xffffff; break;
178 case 2 : a
+=k
[0]&0xffff; break;
179 case 1 : a
+=k
[0]&0xff; break;
180 case 0 : return c
; /* zero length strings require no mixing */
183 #else /* make valgrind happy */
185 k8
= (const uint8_t *)k
;
188 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
189 case 11: c
+=((uint32_t)k8
[10])<<16; /* fall through */
190 case 10: c
+=((uint32_t)k8
[9])<<8; /* fall through */
191 case 9 : c
+=k8
[8]; /* fall through */
192 case 8 : b
+=k
[1]; a
+=k
[0]; break;
193 case 7 : b
+=((uint32_t)k8
[6])<<16; /* fall through */
194 case 6 : b
+=((uint32_t)k8
[5])<<8; /* fall through */
195 case 5 : b
+=k8
[4]; /* fall through */
196 case 4 : a
+=k
[0]; break;
197 case 3 : a
+=((uint32_t)k8
[2])<<16; /* fall through */
198 case 2 : a
+=((uint32_t)k8
[1])<<8; /* fall through */
199 case 1 : a
+=k8
[0]; break;
200 case 0 : return c
; /* zero length strings require no mixing */
203 #endif /* !valgrind */
205 } else if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x1) == 0)) {
206 const uint16_t *k
= key
; /* read 16-bit chunks */
209 /*--------------- all but last block: aligned reads and different mixing */
212 a
+= k
[0] + (((uint32_t)k
[1])<<16);
213 b
+= k
[2] + (((uint32_t)k
[3])<<16);
214 c
+= k
[4] + (((uint32_t)k
[5])<<16);
220 /*----------------------------- handle the last (probably partial) block */
221 k8
= (const uint8_t *)k
;
224 case 12: c
+=k
[4]+(((uint32_t)k
[5])<<16);
225 b
+=k
[2]+(((uint32_t)k
[3])<<16);
226 a
+=k
[0]+(((uint32_t)k
[1])<<16);
228 case 11: c
+=((uint32_t)k8
[10])<<16; /* @fallthrough */
229 case 10: c
+=k
[4]; /* @fallthrough@ */
230 b
+=k
[2]+(((uint32_t)k
[3])<<16);
231 a
+=k
[0]+(((uint32_t)k
[1])<<16);
233 case 9 : c
+=k8
[8]; /* @fallthrough */
234 case 8 : b
+=k
[2]+(((uint32_t)k
[3])<<16);
235 a
+=k
[0]+(((uint32_t)k
[1])<<16);
237 case 7 : b
+=((uint32_t)k8
[6])<<16; /* @fallthrough */
239 a
+=k
[0]+(((uint32_t)k
[1])<<16);
241 case 5 : b
+=k8
[4]; /* @fallthrough */
242 case 4 : a
+=k
[0]+(((uint32_t)k
[1])<<16);
244 case 3 : a
+=((uint32_t)k8
[2])<<16; /* @fallthrough */
249 case 0 : return c
; /* zero length strings require no mixing */
252 } else { /* need to read the key one byte at a time */
253 const uint8_t *k
= key
;
255 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
259 a
+= ((uint32_t)k
[1])<<8;
260 a
+= ((uint32_t)k
[2])<<16;
261 a
+= ((uint32_t)k
[3])<<24;
263 b
+= ((uint32_t)k
[5])<<8;
264 b
+= ((uint32_t)k
[6])<<16;
265 b
+= ((uint32_t)k
[7])<<24;
267 c
+= ((uint32_t)k
[9])<<8;
268 c
+= ((uint32_t)k
[10])<<16;
269 c
+= ((uint32_t)k
[11])<<24;
275 /*-------------------------------- last block: affect all 32 bits of (c) */
276 switch(length
) /* all the case statements fall through */
278 case 12: c
+=((uint32_t)k
[11])<<24;
279 case 11: c
+=((uint32_t)k
[10])<<16;
280 case 10: c
+=((uint32_t)k
[9])<<8;
282 case 8 : b
+=((uint32_t)k
[7])<<24;
283 case 7 : b
+=((uint32_t)k
[6])<<16;
284 case 6 : b
+=((uint32_t)k
[5])<<8;
286 case 4 : a
+=((uint32_t)k
[3])<<24;
287 case 3 : a
+=((uint32_t)k
[2])<<16;
288 case 2 : a
+=((uint32_t)k
[1])<<8;
291 case 0 : return c
; /* zero length strings require no mixing */
296 return c
; /* zero length strings require no mixing */
299 #elif HASH_BIG_ENDIAN == 1
302 * This is the same as hashword() on big-endian machines. It is different
303 * from hashlittle() on all machines. hashbig() takes advantage of
304 * big-endian byte ordering.
306 uint32_t hash( const void *key
, size_t length
, const uint32_t initval
)
309 union { const void *ptr
; size_t i
; } u
; /* to cast key to (size_t) happily */
311 /* Set up the internal state */
312 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
315 if (HASH_BIG_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
316 const uint32_t *k
= key
; /* read 32-bit chunks */
319 #endif /* ifdef VALGRIND */
321 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
332 /*----------------------------- handle the last (probably partial) block */
334 * "k[2]<<8" actually reads beyond the end of the string, but
335 * then shifts out the part it's not allowed to read. Because the
336 * string is aligned, the illegal read is in the same word as the
337 * rest of the string. Every machine with memory protection I've seen
338 * does it on word boundaries, so is OK with this. But VALGRIND will
339 * still catch it and complain. The masking trick does make the hash
340 * noticably faster for short strings (like English words).
346 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
347 case 11: c
+=k
[2]&0xffffff00; b
+=k
[1]; a
+=k
[0]; break;
348 case 10: c
+=k
[2]&0xffff0000; b
+=k
[1]; a
+=k
[0]; break;
349 case 9 : c
+=k
[2]&0xff000000; b
+=k
[1]; a
+=k
[0]; break;
350 case 8 : b
+=k
[1]; a
+=k
[0]; break;
351 case 7 : b
+=k
[1]&0xffffff00; a
+=k
[0]; break;
352 case 6 : b
+=k
[1]&0xffff0000; a
+=k
[0]; break;
353 case 5 : b
+=k
[1]&0xff000000; a
+=k
[0]; break;
354 case 4 : a
+=k
[0]; break;
355 case 3 : a
+=k
[0]&0xffffff00; break;
356 case 2 : a
+=k
[0]&0xffff0000; break;
357 case 1 : a
+=k
[0]&0xff000000; break;
358 case 0 : return c
; /* zero length strings require no mixing */
361 #else /* make valgrind happy */
363 k8
= (const uint8_t *)k
;
364 switch(length
) /* all the case statements fall through */
366 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
367 case 11: c
+=((uint32_t)k8
[10])<<8; /* fall through */
368 case 10: c
+=((uint32_t)k8
[9])<<16; /* fall through */
369 case 9 : c
+=((uint32_t)k8
[8])<<24; /* fall through */
370 case 8 : b
+=k
[1]; a
+=k
[0]; break;
371 case 7 : b
+=((uint32_t)k8
[6])<<8; /* fall through */
372 case 6 : b
+=((uint32_t)k8
[5])<<16; /* fall through */
373 case 5 : b
+=((uint32_t)k8
[4])<<24; /* fall through */
374 case 4 : a
+=k
[0]; break;
375 case 3 : a
+=((uint32_t)k8
[2])<<8; /* fall through */
376 case 2 : a
+=((uint32_t)k8
[1])<<16; /* fall through */
377 case 1 : a
+=((uint32_t)k8
[0])<<24; break;
381 #endif /* !VALGRIND */
383 } else { /* need to read the key one byte at a time */
384 const uint8_t *k
= key
;
386 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
389 a
+= ((uint32_t)k
[0])<<24;
390 a
+= ((uint32_t)k
[1])<<16;
391 a
+= ((uint32_t)k
[2])<<8;
392 a
+= ((uint32_t)k
[3]);
393 b
+= ((uint32_t)k
[4])<<24;
394 b
+= ((uint32_t)k
[5])<<16;
395 b
+= ((uint32_t)k
[6])<<8;
396 b
+= ((uint32_t)k
[7]);
397 c
+= ((uint32_t)k
[8])<<24;
398 c
+= ((uint32_t)k
[9])<<16;
399 c
+= ((uint32_t)k
[10])<<8;
400 c
+= ((uint32_t)k
[11]);
406 /*-------------------------------- last block: affect all 32 bits of (c) */
407 switch(length
) /* all the case statements fall through */
410 case 11: c
+=((uint32_t)k
[10])<<8;
411 case 10: c
+=((uint32_t)k
[9])<<16;
412 case 9 : c
+=((uint32_t)k
[8])<<24;
414 case 7 : b
+=((uint32_t)k
[6])<<8;
415 case 6 : b
+=((uint32_t)k
[5])<<16;
416 case 5 : b
+=((uint32_t)k
[4])<<24;
418 case 3 : a
+=((uint32_t)k
[2])<<8;
419 case 2 : a
+=((uint32_t)k
[1])<<16;
420 case 1 : a
+=((uint32_t)k
[0])<<24;
429 #else /* HASH_XXX_ENDIAN == 1 */
430 #error Must define HASH_BIG_ENDIAN or HASH_LITTLE_ENDIAN
431 #endif /* HASH_XXX_ENDIAN == 1 */