projects
/
awesomized
/
libmemcached
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
|
github
raw
|
inline
| side by side
cmake: build&run tests (sortof)
[awesomized/libmemcached]
/
libhashkit
/
murmur3.cc
diff --git
a/libhashkit/murmur3.cc
b/libhashkit/murmur3.cc
index e5f06ce26b367540ef45fa101c73d312d67f5c9c..254b5090e3bf4d61450e4824d3be9bd692437949 100644
(file)
--- a/
libhashkit/murmur3.cc
+++ b/
libhashkit/murmur3.cc
@@
-39,7
+39,13
@@
static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
-#define getblock(p, i) (p[i])
+#include <cstring>
+template <typename T>
+static inline T getblock(const T *blocks, int i) {
+ T b;
+ memcpy(&b, ((const uint8_t *) blocks) + i * sizeof(T), sizeof(T));
+ return b;
+}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
@@
-110,7
+116,9
@@
void MurmurHash3_x86_32 ( const void * key, int len,
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
+ /* fall through */
case 2: k1 ^= tail[1] << 8;
case 2: k1 ^= tail[1] << 8;
+ /* fall through */
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
@@
-186,25
+194,36
@@
void MurmurHash3_x86_128 ( const void * key, const int len,
switch(len & 15)
{
case 15: k4 ^= tail[14] << 16;
switch(len & 15)
{
case 15: k4 ^= tail[14] << 16;
+ /* fall through */
case 14: k4 ^= tail[13] << 8;
case 14: k4 ^= tail[13] << 8;
+ /* fall through */
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
-
+ /* fall through */
case 12: k3 ^= tail[11] << 24;
case 12: k3 ^= tail[11] << 24;
+ /* fall through */
case 11: k3 ^= tail[10] << 16;
case 11: k3 ^= tail[10] << 16;
+ /* fall through */
case 10: k3 ^= tail[ 9] << 8;
case 10: k3 ^= tail[ 9] << 8;
+ /* fall through */
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
-
+ /* fall through */
case 8: k2 ^= tail[ 7] << 24;
case 8: k2 ^= tail[ 7] << 24;
+ /* fall through */
case 7: k2 ^= tail[ 6] << 16;
case 7: k2 ^= tail[ 6] << 16;
+ /* fall through */
case 6: k2 ^= tail[ 5] << 8;
case 6: k2 ^= tail[ 5] << 8;
+ /* fall through */
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
-
+ /* fall through */
case 4: k1 ^= tail[ 3] << 24;
case 4: k1 ^= tail[ 3] << 24;
+ /* fall through */
case 3: k1 ^= tail[ 2] << 16;
case 3: k1 ^= tail[ 2] << 16;
+ /* fall through */
case 2: k1 ^= tail[ 1] << 8;
case 2: k1 ^= tail[ 1] << 8;
+ /* fall through */
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
@@
-276,21
+295,34
@@
void MurmurHash3_x64_128 ( const void * key, const int len,
switch(len & 15)
{
case 15: k2 ^= (uint64_t)(tail[14]) << 48;
switch(len & 15)
{
case 15: k2 ^= (uint64_t)(tail[14]) << 48;
+ /* fall through */
case 14: k2 ^= (uint64_t)(tail[13]) << 40;
case 14: k2 ^= (uint64_t)(tail[13]) << 40;
+ /* fall through */
case 13: k2 ^= (uint64_t)(tail[12]) << 32;
case 13: k2 ^= (uint64_t)(tail[12]) << 32;
+ /* fall through */
case 12: k2 ^= (uint64_t)(tail[11]) << 24;
case 12: k2 ^= (uint64_t)(tail[11]) << 24;
+ /* fall through */
case 11: k2 ^= (uint64_t)(tail[10]) << 16;
case 11: k2 ^= (uint64_t)(tail[10]) << 16;
+ /* fall through */
case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
+ /* fall through */
case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
-
+ /* fall through */
case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
+ /* fall through */
case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
+ /* fall through */
case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
+ /* fall through */
case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
+ /* fall through */
case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
+ /* fall through */
case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
+ /* fall through */
case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
+ /* fall through */
case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};