cppcheck: fix warnings
[awesomized/libmemcached] / libhashkit / murmur3.cc
1 //-----------------------------------------------------------------------------
2 //MurmurHash3 was written by Austin Appleby, and is placed in the public
3 //domain. The author hereby disclaims copyright to this source code.
4
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
9
10 #include "libhashkit/hashkitcon.h"
11
12 #include "libhashkit/murmur3.h"
13
14 //-----------------------------------------------------------------------------
15 // Platform-specific functions and macros
16
17 #ifdef __GNUC__
18 #define FORCE_INLINE __attribute__((always_inline)) inline
19 #else
20 #define FORCE_INLINE inline
21 #endif
22
23 static FORCE_INLINE uint32_t rotl32 ( uint32_t x, int8_t r )
24 {
25 return (x << r) | (x >> (32 - r));
26 }
27
28 static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )
29 {
30 return (x << r) | (x >> (64 - r));
31 }
32
33 #define ROTL32(x,y) rotl32(x,y)
34 #define ROTL64(x,y) rotl64(x,y)
35
36 #define BIG_CONSTANT(x) (x##LLU)
37
38 //-----------------------------------------------------------------------------
39 // Block read - if your platform needs to do endian-swapping or can only
40 // handle aligned reads, do the conversion here
41
42 #include <cstring>
43 template <typename T>
44 static inline T getblock(const T *blocks, int i) {
45 T b;
46 memcpy(&b, ((const uint8_t *) blocks) + i * sizeof(T), sizeof(T));
47 return b;
48 }
49
50 //-----------------------------------------------------------------------------
51 // Finalization mix - force all bits of a hash block to avalanche
52
53 static FORCE_INLINE uint32_t fmix32 ( uint32_t h )
54 {
55 h ^= h >> 16;
56 h *= 0x85ebca6b;
57 h ^= h >> 13;
58 h *= 0xc2b2ae35;
59 h ^= h >> 16;
60
61 return h;
62 }
63
64 //----------
65
66 static FORCE_INLINE uint64_t fmix64 ( uint64_t k )
67 {
68 k ^= k >> 33;
69 k *= BIG_CONSTANT(0xff51afd7ed558ccd);
70 k ^= k >> 33;
71 k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
72 k ^= k >> 33;
73
74 return k;
75 }
76
77 //-----------------------------------------------------------------------------
78
79 void MurmurHash3_x86_32 ( const void * key, int len,
80 uint32_t seed, void * out )
81 {
82 const uint8_t * data = (const uint8_t*)key;
83 const int nblocks = len / 4;
84 int i;
85
86 uint32_t h1 = seed;
87
88 uint32_t c1 = 0xcc9e2d51;
89 uint32_t c2 = 0x1b873593;
90
91 //----------
92 // body
93
94 const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
95
96 for(i = -nblocks; i; i++)
97 {
98 uint32_t k1 = getblock(blocks,i);
99
100 k1 *= c1;
101 k1 = ROTL32(k1,15);
102 k1 *= c2;
103
104 h1 ^= k1;
105 h1 = ROTL32(h1,13);
106 h1 = h1*5+0xe6546b64;
107 }
108
109 //----------
110 // tail
111
112 const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
113
114 uint32_t k1 = 0;
115
116 switch(len & 3)
117 {
118 case 3: k1 ^= tail[2] << 16;
119 /* fall through */
120 case 2: k1 ^= tail[1] << 8;
121 /* fall through */
122 case 1: k1 ^= tail[0];
123 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
124 };
125
126 //----------
127 // finalization
128
129 h1 ^= len;
130
131 h1 = fmix32(h1);
132
133 *(uint32_t*)out = h1;
134 }
135
136 //-----------------------------------------------------------------------------
137
138 void MurmurHash3_x86_128 ( const void * key, const int len,
139 uint32_t seed, void * out )
140 {
141 const uint8_t * data = (const uint8_t*)key;
142 const int nblocks = len / 16;
143 int i;
144
145 uint32_t h1 = seed;
146 uint32_t h2 = seed;
147 uint32_t h3 = seed;
148 uint32_t h4 = seed;
149
150 uint32_t c1 = 0x239b961b;
151 uint32_t c2 = 0xab0e9789;
152 uint32_t c3 = 0x38b34ae5;
153 uint32_t c4 = 0xa1e38b93;
154
155 //----------
156 // body
157
158 const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
159
160 for(i = -nblocks; i; i++)
161 {
162 uint32_t k1 = getblock(blocks,i*4+0);
163 uint32_t k2 = getblock(blocks,i*4+1);
164 uint32_t k3 = getblock(blocks,i*4+2);
165 uint32_t k4 = getblock(blocks,i*4+3);
166
167 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
168
169 h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
170
171 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
172
173 h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
174
175 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
176
177 h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
178
179 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
180
181 h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
182 }
183
184 //----------
185 // tail
186
187 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
188
189 uint32_t k1 = 0;
190 uint32_t k2 = 0;
191 uint32_t k3 = 0;
192 uint32_t k4 = 0;
193
194 switch(len & 15)
195 {
196 case 15: k4 ^= tail[14] << 16;
197 /* fall through */
198 case 14: k4 ^= tail[13] << 8;
199 /* fall through */
200 case 13: k4 ^= tail[12] << 0;
201 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
202 /* fall through */
203 case 12: k3 ^= tail[11] << 24;
204 /* fall through */
205 case 11: k3 ^= tail[10] << 16;
206 /* fall through */
207 case 10: k3 ^= tail[ 9] << 8;
208 /* fall through */
209 case 9: k3 ^= tail[ 8] << 0;
210 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
211 /* fall through */
212 case 8: k2 ^= tail[ 7] << 24;
213 /* fall through */
214 case 7: k2 ^= tail[ 6] << 16;
215 /* fall through */
216 case 6: k2 ^= tail[ 5] << 8;
217 /* fall through */
218 case 5: k2 ^= tail[ 4] << 0;
219 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
220 /* fall through */
221 case 4: k1 ^= tail[ 3] << 24;
222 /* fall through */
223 case 3: k1 ^= tail[ 2] << 16;
224 /* fall through */
225 case 2: k1 ^= tail[ 1] << 8;
226 /* fall through */
227 case 1: k1 ^= tail[ 0] << 0;
228 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
229 };
230
231 //----------
232 // finalization
233
234 h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
235
236 h1 += h2; h1 += h3; h1 += h4;
237 h2 += h1; h3 += h1; h4 += h1;
238
239 h1 = fmix32(h1);
240 h2 = fmix32(h2);
241 h3 = fmix32(h3);
242 h4 = fmix32(h4);
243
244 h1 += h2; h1 += h3; h1 += h4;
245 h2 += h1; h3 += h1; h4 += h1;
246
247 ((uint32_t*)out)[0] = h1;
248 ((uint32_t*)out)[1] = h2;
249 ((uint32_t*)out)[2] = h3;
250 ((uint32_t*)out)[3] = h4;
251 }
252
253 //-----------------------------------------------------------------------------
254
255 void MurmurHash3_x64_128 ( const void * key, const int len,
256 const uint32_t seed, void * out )
257 {
258 const uint8_t * data = (const uint8_t*)key;
259 const int nblocks = len / 16;
260 int i;
261
262 uint64_t h1 = seed;
263 uint64_t h2 = seed;
264
265 uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
266 uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
267
268 //----------
269 // body
270
271 const uint64_t * blocks = (const uint64_t *)(data);
272
273 for(i = 0; i < nblocks; i++)
274 {
275 uint64_t k1 = getblock(blocks,i*2+0);
276 uint64_t k2 = getblock(blocks,i*2+1);
277
278 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
279
280 h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
281
282 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
283
284 h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
285 }
286
287 //----------
288 // tail
289
290 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
291
292 uint64_t k1 = 0;
293 uint64_t k2 = 0;
294
295 switch(len & 15)
296 {
297 case 15: k2 ^= (uint64_t)(tail[14]) << 48;
298 /* fall through */
299 case 14: k2 ^= (uint64_t)(tail[13]) << 40;
300 /* fall through */
301 case 13: k2 ^= (uint64_t)(tail[12]) << 32;
302 /* fall through */
303 case 12: k2 ^= (uint64_t)(tail[11]) << 24;
304 /* fall through */
305 case 11: k2 ^= (uint64_t)(tail[10]) << 16;
306 /* fall through */
307 case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
308 /* fall through */
309 case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
310 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
311 /* fall through */
312 case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
313 /* fall through */
314 case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
315 /* fall through */
316 case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
317 /* fall through */
318 case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
319 /* fall through */
320 case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
321 /* fall through */
322 case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
323 /* fall through */
324 case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
325 /* fall through */
326 case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
327 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
328 };
329
330 //----------
331 // finalization
332
333 h1 ^= len; h2 ^= len;
334
335 h1 += h2;
336 h2 += h1;
337
338 h1 = fmix64(h1);
339 h2 = fmix64(h2);
340
341 h1 += h2;
342 h2 += h1;
343
344 ((uint64_t*)out)[0] = h1;
345 ((uint64_t*)out)[1] = h2;
346 }
347
348 //-----------------------------------------------------------------------------
349