1 //-----------------------------------------------------------------------------
2 //MurmurHash3 was written by Austin Appleby, and is placed in the public
3 //domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 #include "libhashkit/hashkitcon.h"
12 #include "libhashkit/murmur3.h"
14 //-----------------------------------------------------------------------------
15 // Platform-specific functions and macros
18 #define FORCE_INLINE __attribute__((always_inline)) inline
20 #define FORCE_INLINE inline
23 static FORCE_INLINE
uint32_t rotl32 ( uint32_t x
, int8_t r
)
25 return (x
<< r
) | (x
>> (32 - r
));
28 static FORCE_INLINE
uint64_t rotl64 ( uint64_t x
, int8_t r
)
30 return (x
<< r
) | (x
>> (64 - r
));
33 #define ROTL32(x,y) rotl32(x,y)
34 #define ROTL64(x,y) rotl64(x,y)
36 #define BIG_CONSTANT(x) (x##LLU)
38 //-----------------------------------------------------------------------------
39 // Block read - if your platform needs to do endian-swapping or can only
40 // handle aligned reads, do the conversion here
45 static inline T
getblock(const T
*blocks
, int i
) {
48 const uint8_t *data
= ((const uint8_t *) blocks
) + i
* sizeof(T
);
49 # define sl(s) (((T)data[s - 1]) << (8 * (sizeof(T) - s)))
52 case 8: b
|= sl(8); /* fall through */
53 case 7: b
|= sl(7); /* fall through */
54 case 6: b
|= sl(6); /* fall through */
55 case 5: b
|= sl(5); /* fall through */
56 case 4: b
|= sl(4); /* fall through */
57 case 3: b
|= sl(3); /* fall through */
58 case 2: b
|= sl(2); /* fall through */
59 case 1: b
|= sl(1); break;
63 memcpy(&b
, ((const uint8_t *) blocks
) + i
* sizeof(T
), sizeof(T
));
68 //-----------------------------------------------------------------------------
69 // Finalization mix - force all bits of a hash block to avalanche
71 static FORCE_INLINE
uint32_t fmix32 ( uint32_t h
)
84 static FORCE_INLINE
uint64_t fmix64 ( uint64_t k
)
87 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
89 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
95 //-----------------------------------------------------------------------------
97 void MurmurHash3_x86_32 ( const void * key
, int len
,
98 uint32_t seed
, void * out
)
100 const uint8_t * data
= (const uint8_t*)key
;
101 const int nblocks
= len
/ 4;
106 uint32_t c1
= 0xcc9e2d51;
107 uint32_t c2
= 0x1b873593;
112 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*4);
114 for(i
= -nblocks
; i
; i
++)
116 uint32_t k1
= getblock(blocks
,i
);
124 h1
= h1
*5+0xe6546b64;
130 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*4);
137 case 3: k1
^= tail
[2] << 8;
139 case 2: k1
^= tail
[1] << 16;
141 case 1: k1
^= tail
[0] << 24;
143 case 3: k1
^= tail
[2] << 16;
145 case 2: k1
^= tail
[1] << 8;
147 case 1: k1
^= tail
[0];
149 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
159 *(uint32_t*)out
= h1
;
162 //-----------------------------------------------------------------------------
164 void MurmurHash3_x86_128 ( const void * key
, const int len
,
165 uint32_t seed
, void * out
)
167 const uint8_t * data
= (const uint8_t*)key
;
168 const int nblocks
= len
/ 16;
176 uint32_t c1
= 0x239b961b;
177 uint32_t c2
= 0xab0e9789;
178 uint32_t c3
= 0x38b34ae5;
179 uint32_t c4
= 0xa1e38b93;
184 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*16);
186 for(i
= -nblocks
; i
; i
++)
188 uint32_t k1
= getblock(blocks
,i
*4+0);
189 uint32_t k2
= getblock(blocks
,i
*4+1);
190 uint32_t k3
= getblock(blocks
,i
*4+2);
191 uint32_t k4
= getblock(blocks
,i
*4+3);
193 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
195 h1
= ROTL32(h1
,19); h1
+= h2
; h1
= h1
*5+0x561ccd1b;
197 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
199 h2
= ROTL32(h2
,17); h2
+= h3
; h2
= h2
*5+0x0bcaa747;
201 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
203 h3
= ROTL32(h3
,15); h3
+= h4
; h3
= h3
*5+0x96cd1c35;
205 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
207 h4
= ROTL32(h4
,13); h4
+= h1
; h4
= h4
*5+0x32ac3b17;
213 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
222 case 15: k4
^= tail
[14] << 16;
224 case 14: k4
^= tail
[13] << 8;
226 case 13: k4
^= tail
[12] << 0;
227 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
229 case 12: k3
^= tail
[11] << 24;
231 case 11: k3
^= tail
[10] << 16;
233 case 10: k3
^= tail
[ 9] << 8;
235 case 9: k3
^= tail
[ 8] << 0;
236 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
238 case 8: k2
^= tail
[ 7] << 24;
240 case 7: k2
^= tail
[ 6] << 16;
242 case 6: k2
^= tail
[ 5] << 8;
244 case 5: k2
^= tail
[ 4] << 0;
245 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
247 case 4: k1
^= tail
[ 3] << 24;
249 case 3: k1
^= tail
[ 2] << 16;
251 case 2: k1
^= tail
[ 1] << 8;
253 case 1: k1
^= tail
[ 0] << 0;
254 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
260 h1
^= len
; h2
^= len
; h3
^= len
; h4
^= len
;
262 h1
+= h2
; h1
+= h3
; h1
+= h4
;
263 h2
+= h1
; h3
+= h1
; h4
+= h1
;
270 h1
+= h2
; h1
+= h3
; h1
+= h4
;
271 h2
+= h1
; h3
+= h1
; h4
+= h1
;
273 ((uint32_t*)out
)[0] = h1
;
274 ((uint32_t*)out
)[1] = h2
;
275 ((uint32_t*)out
)[2] = h3
;
276 ((uint32_t*)out
)[3] = h4
;
279 //-----------------------------------------------------------------------------
281 void MurmurHash3_x64_128 ( const void * key
, const int len
,
282 const uint32_t seed
, void * out
)
284 const uint8_t * data
= (const uint8_t*)key
;
285 const int nblocks
= len
/ 16;
291 uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
292 uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
297 const uint64_t * blocks
= (const uint64_t *)(data
);
299 for(i
= 0; i
< nblocks
; i
++)
301 uint64_t k1
= getblock(blocks
,i
*2+0);
302 uint64_t k2
= getblock(blocks
,i
*2+1);
304 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
306 h1
= ROTL64(h1
,27); h1
+= h2
; h1
= h1
*5+0x52dce729;
308 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
310 h2
= ROTL64(h2
,31); h2
+= h1
; h2
= h2
*5+0x38495ab5;
316 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
323 case 15: k2
^= (uint64_t)(tail
[14]) << 48;
325 case 14: k2
^= (uint64_t)(tail
[13]) << 40;
327 case 13: k2
^= (uint64_t)(tail
[12]) << 32;
329 case 12: k2
^= (uint64_t)(tail
[11]) << 24;
331 case 11: k2
^= (uint64_t)(tail
[10]) << 16;
333 case 10: k2
^= (uint64_t)(tail
[ 9]) << 8;
335 case 9: k2
^= (uint64_t)(tail
[ 8]) << 0;
336 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
338 case 8: k1
^= (uint64_t)(tail
[ 7]) << 56;
340 case 7: k1
^= (uint64_t)(tail
[ 6]) << 48;
342 case 6: k1
^= (uint64_t)(tail
[ 5]) << 40;
344 case 5: k1
^= (uint64_t)(tail
[ 4]) << 32;
346 case 4: k1
^= (uint64_t)(tail
[ 3]) << 24;
348 case 3: k1
^= (uint64_t)(tail
[ 2]) << 16;
350 case 2: k1
^= (uint64_t)(tail
[ 1]) << 8;
352 case 1: k1
^= (uint64_t)(tail
[ 0]) << 0;
353 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
359 h1
^= len
; h2
^= len
;
370 ((uint64_t*)out
)[0] = h1
;
371 ((uint64_t*)out
)[1] = h2
;
374 //-----------------------------------------------------------------------------