c++: fix -Wimplicit-fallthrough
[awesomized/libmemcached] / libhashkit / murmur3.cc
1 //-----------------------------------------------------------------------------
2 //MurmurHash3 was written by Austin Appleby, and is placed in the public
3 //domain. The author hereby disclaims copyright to this source code.
4
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
9
10 #include "libhashkit/hashkitcon.h"
11
12 #include "libhashkit/murmur3.h"
13
14 //-----------------------------------------------------------------------------
15 // Platform-specific functions and macros
16
17 #ifdef __GNUC__
18 #define FORCE_INLINE __attribute__((always_inline)) inline
19 #else
20 #define FORCE_INLINE inline
21 #endif
22
23 static FORCE_INLINE uint32_t rotl32 ( uint32_t x, int8_t r )
24 {
25 return (x << r) | (x >> (32 - r));
26 }
27
28 static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )
29 {
30 return (x << r) | (x >> (64 - r));
31 }
32
33 #define ROTL32(x,y) rotl32(x,y)
34 #define ROTL64(x,y) rotl64(x,y)
35
36 #define BIG_CONSTANT(x) (x##LLU)
37
38 //-----------------------------------------------------------------------------
39 // Block read - if your platform needs to do endian-swapping or can only
40 // handle aligned reads, do the conversion here
41
42 #define getblock(p, i) (p[i])
43
44 //-----------------------------------------------------------------------------
45 // Finalization mix - force all bits of a hash block to avalanche
46
47 static FORCE_INLINE uint32_t fmix32 ( uint32_t h )
48 {
49 h ^= h >> 16;
50 h *= 0x85ebca6b;
51 h ^= h >> 13;
52 h *= 0xc2b2ae35;
53 h ^= h >> 16;
54
55 return h;
56 }
57
58 //----------
59
60 static FORCE_INLINE uint64_t fmix64 ( uint64_t k )
61 {
62 k ^= k >> 33;
63 k *= BIG_CONSTANT(0xff51afd7ed558ccd);
64 k ^= k >> 33;
65 k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
66 k ^= k >> 33;
67
68 return k;
69 }
70
71 //-----------------------------------------------------------------------------
72
73 void MurmurHash3_x86_32 ( const void * key, int len,
74 uint32_t seed, void * out )
75 {
76 const uint8_t * data = (const uint8_t*)key;
77 const int nblocks = len / 4;
78 int i;
79
80 uint32_t h1 = seed;
81
82 uint32_t c1 = 0xcc9e2d51;
83 uint32_t c2 = 0x1b873593;
84
85 //----------
86 // body
87
88 const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
89
90 for(i = -nblocks; i; i++)
91 {
92 uint32_t k1 = getblock(blocks,i);
93
94 k1 *= c1;
95 k1 = ROTL32(k1,15);
96 k1 *= c2;
97
98 h1 ^= k1;
99 h1 = ROTL32(h1,13);
100 h1 = h1*5+0xe6546b64;
101 }
102
103 //----------
104 // tail
105
106 const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
107
108 uint32_t k1 = 0;
109
110 switch(len & 3)
111 {
112 case 3: k1 ^= tail[2] << 16; /* fall through */
113 case 2: k1 ^= tail[1] << 8; /* fall through */
114 case 1: k1 ^= tail[0];
115 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
116 };
117
118 //----------
119 // finalization
120
121 h1 ^= len;
122
123 h1 = fmix32(h1);
124
125 *(uint32_t*)out = h1;
126 }
127
128 //-----------------------------------------------------------------------------
129
130 void MurmurHash3_x86_128 ( const void * key, const int len,
131 uint32_t seed, void * out )
132 {
133 const uint8_t * data = (const uint8_t*)key;
134 const int nblocks = len / 16;
135 int i;
136
137 uint32_t h1 = seed;
138 uint32_t h2 = seed;
139 uint32_t h3 = seed;
140 uint32_t h4 = seed;
141
142 uint32_t c1 = 0x239b961b;
143 uint32_t c2 = 0xab0e9789;
144 uint32_t c3 = 0x38b34ae5;
145 uint32_t c4 = 0xa1e38b93;
146
147 //----------
148 // body
149
150 const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
151
152 for(i = -nblocks; i; i++)
153 {
154 uint32_t k1 = getblock(blocks,i*4+0);
155 uint32_t k2 = getblock(blocks,i*4+1);
156 uint32_t k3 = getblock(blocks,i*4+2);
157 uint32_t k4 = getblock(blocks,i*4+3);
158
159 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
160
161 h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
162
163 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
164
165 h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
166
167 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
168
169 h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
170
171 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
172
173 h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
174 }
175
176 //----------
177 // tail
178
179 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
180
181 uint32_t k1 = 0;
182 uint32_t k2 = 0;
183 uint32_t k3 = 0;
184 uint32_t k4 = 0;
185
186 switch(len & 15)
187 {
188 case 15: k4 ^= tail[14] << 16; /* fall through */
189 case 14: k4 ^= tail[13] << 8; /* fall through */
190 case 13: k4 ^= tail[12] << 0;
191 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
192 /* fall through */
193 case 12: k3 ^= tail[11] << 24; /* fall through */
194 case 11: k3 ^= tail[10] << 16; /* fall through */
195 case 10: k3 ^= tail[ 9] << 8; /* fall through */
196 case 9: k3 ^= tail[ 8] << 0; /* fall through */
197 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
198 /* fall through */
199 case 8: k2 ^= tail[ 7] << 24; /* fall through */
200 case 7: k2 ^= tail[ 6] << 16; /* fall through */
201 case 6: k2 ^= tail[ 5] << 8; /* fall through */
202 case 5: k2 ^= tail[ 4] << 0; /* fall through */
203 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
204 /* fall through */
205 case 4: k1 ^= tail[ 3] << 24; /* fall through */
206 case 3: k1 ^= tail[ 2] << 16; /* fall through */
207 case 2: k1 ^= tail[ 1] << 8; /* fall through */
208 case 1: k1 ^= tail[ 0] << 0; /* fall through */
209 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
210 };
211
212 //----------
213 // finalization
214
215 h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
216
217 h1 += h2; h1 += h3; h1 += h4;
218 h2 += h1; h3 += h1; h4 += h1;
219
220 h1 = fmix32(h1);
221 h2 = fmix32(h2);
222 h3 = fmix32(h3);
223 h4 = fmix32(h4);
224
225 h1 += h2; h1 += h3; h1 += h4;
226 h2 += h1; h3 += h1; h4 += h1;
227
228 ((uint32_t*)out)[0] = h1;
229 ((uint32_t*)out)[1] = h2;
230 ((uint32_t*)out)[2] = h3;
231 ((uint32_t*)out)[3] = h4;
232 }
233
234 //-----------------------------------------------------------------------------
235
236 void MurmurHash3_x64_128 ( const void * key, const int len,
237 const uint32_t seed, void * out )
238 {
239 const uint8_t * data = (const uint8_t*)key;
240 const int nblocks = len / 16;
241 int i;
242
243 uint64_t h1 = seed;
244 uint64_t h2 = seed;
245
246 uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
247 uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
248
249 //----------
250 // body
251
252 const uint64_t * blocks = (const uint64_t *)(data);
253
254 for(i = 0; i < nblocks; i++)
255 {
256 uint64_t k1 = getblock(blocks,i*2+0);
257 uint64_t k2 = getblock(blocks,i*2+1);
258
259 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
260
261 h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
262
263 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
264
265 h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
266 }
267
268 //----------
269 // tail
270
271 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
272
273 uint64_t k1 = 0;
274 uint64_t k2 = 0;
275
276 switch(len & 15)
277 {
278 case 15: k2 ^= (uint64_t)(tail[14]) << 48; /* fall through */
279 case 14: k2 ^= (uint64_t)(tail[13]) << 40; /* fall through */
280 case 13: k2 ^= (uint64_t)(tail[12]) << 32; /* fall through */
281 case 12: k2 ^= (uint64_t)(tail[11]) << 24; /* fall through */
282 case 11: k2 ^= (uint64_t)(tail[10]) << 16; /* fall through */
283 case 10: k2 ^= (uint64_t)(tail[ 9]) << 8; /* fall through */
284 case 9: k2 ^= (uint64_t)(tail[ 8]) << 0; /* fall through */
285 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
286 /* fall through */
287 case 8: k1 ^= (uint64_t)(tail[ 7]) << 56; /* fall through */
288 case 7: k1 ^= (uint64_t)(tail[ 6]) << 48; /* fall through */
289 case 6: k1 ^= (uint64_t)(tail[ 5]) << 40; /* fall through */
290 case 5: k1 ^= (uint64_t)(tail[ 4]) << 32; /* fall through */
291 case 4: k1 ^= (uint64_t)(tail[ 3]) << 24; /* fall through */
292 case 3: k1 ^= (uint64_t)(tail[ 2]) << 16; /* fall through */
293 case 2: k1 ^= (uint64_t)(tail[ 1]) << 8; /* fall through */
294 case 1: k1 ^= (uint64_t)(tail[ 0]) << 0; /* fall through */
295 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
296 };
297
298 //----------
299 // finalization
300
301 h1 ^= len; h2 ^= len;
302
303 h1 += h2;
304 h2 += h1;
305
306 h1 = fmix64(h1);
307 h2 = fmix64(h2);
308
309 h1 += h2;
310 h2 += h1;
311
312 ((uint64_t*)out)[0] = h1;
313 ((uint64_t*)out)[1] = h2;
314 }
315
316 //-----------------------------------------------------------------------------
317