X-Git-Url: https://git.m6w6.name/?a=blobdiff_plain;f=libhashkit%2Fmurmur3.cc;h=8d86cfd859cca7e11d6afa72055572d75d6965e5;hb=cefa03b14574d23cdd2f9db5ff28f210e697042c;hp=5a5666e712f5b14029f6f2dda3bf6d34b4daa80a;hpb=a6b09c592769d07c446a627e7d0f8f42e1d25aac;p=awesomized%2Flibmemcached diff --git a/libhashkit/murmur3.cc b/libhashkit/murmur3.cc index 5a5666e7..8d86cfd8 100644 --- a/libhashkit/murmur3.cc +++ b/libhashkit/murmur3.cc @@ -7,7 +7,7 @@ // compile and run any of them on any platform, but your performance with the // non-native version will be less than optimal. -#include "mem_config.h" +#include "libhashkit/hashkitcon.h" #include "libhashkit/murmur3.h" @@ -39,7 +39,13 @@ static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r ) // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here -#define getblock(p, i) (p[i]) +#include +template +static inline T getblock(const T *blocks, int i) { + T b; + memcpy(&b, ((const uint8_t *) blocks) + i * sizeof(T), sizeof(T)); + return b; +} //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche @@ -109,8 +115,8 @@ void MurmurHash3_x86_32 ( const void * key, int len, switch(len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; + case 3: k1 ^= tail[2] << 16; /* fall through */ + case 2: k1 ^= tail[1] << 8; /* fall through */ case 1: k1 ^= tail[0]; k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; @@ -185,27 +191,27 @@ void MurmurHash3_x86_128 ( const void * key, const int len, switch(len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; + case 15: k4 ^= tail[14] << 16; /* fall through */ + case 14: k4 ^= tail[13] << 8; /* fall through */ case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; - case 9: k3 ^= tail[ 8] << 0; + /* fall through */ + case 12: k3 ^= tail[11] << 24; /* fall through */ + case 11: k3 ^= tail[10] << 16; /* fall through */ + case 10: k3 ^= tail[ 9] << 8; /* fall through */ + case 9: k3 ^= tail[ 8] << 0; /* fall through */ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; - case 5: k2 ^= tail[ 4] << 0; + /* fall through */ + case 8: k2 ^= tail[ 7] << 24; /* fall through */ + case 7: k2 ^= tail[ 6] << 16; /* fall through */ + case 6: k2 ^= tail[ 5] << 8; /* fall through */ + case 5: k2 ^= tail[ 4] << 0; /* fall through */ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; - case 1: k1 ^= tail[ 0] << 0; + /* fall through */ + case 4: k1 ^= tail[ 3] << 24; /* fall through */ + case 3: k1 ^= tail[ 2] << 16; /* fall through */ + case 2: k1 ^= tail[ 1] << 8; /* fall through */ + case 1: k1 ^= tail[ 0] << 0; /* fall through */ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; @@ -275,23 +281,23 @@ void MurmurHash3_x64_128 ( const void * key, const int len, switch(len & 15) { - case 15: k2 ^= (uint64_t)(tail[14]) << 48; - case 14: k2 ^= (uint64_t)(tail[13]) << 40; - case 13: k2 ^= (uint64_t)(tail[12]) << 32; - case 12: k2 ^= (uint64_t)(tail[11]) << 24; - case 11: k2 ^= (uint64_t)(tail[10]) << 16; - case 10: k2 ^= (uint64_t)(tail[ 9]) << 8; - case 9: k2 ^= (uint64_t)(tail[ 8]) << 0; + case 15: k2 ^= (uint64_t)(tail[14]) << 48; /* fall through */ + case 14: k2 ^= (uint64_t)(tail[13]) << 40; /* fall through */ + case 13: k2 ^= (uint64_t)(tail[12]) << 32; /* fall through */ + case 12: k2 ^= (uint64_t)(tail[11]) << 24; /* fall through */ + case 11: k2 ^= (uint64_t)(tail[10]) << 16; /* fall through */ + case 10: k2 ^= (uint64_t)(tail[ 9]) << 8; /* fall through */ + case 9: k2 ^= (uint64_t)(tail[ 8]) << 0; /* fall through */ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= (uint64_t)(tail[ 7]) << 56; - case 7: k1 ^= (uint64_t)(tail[ 6]) << 48; - case 6: k1 ^= (uint64_t)(tail[ 5]) << 40; - case 5: k1 ^= (uint64_t)(tail[ 4]) << 32; - case 4: k1 ^= (uint64_t)(tail[ 3]) << 24; - case 3: k1 ^= (uint64_t)(tail[ 2]) << 16; - case 2: k1 ^= (uint64_t)(tail[ 1]) << 8; - case 1: k1 ^= (uint64_t)(tail[ 0]) << 0; + /* fall through */ + case 8: k1 ^= (uint64_t)(tail[ 7]) << 56; /* fall through */ + case 7: k1 ^= (uint64_t)(tail[ 6]) << 48; /* fall through */ + case 6: k1 ^= (uint64_t)(tail[ 5]) << 40; /* fall through */ + case 5: k1 ^= (uint64_t)(tail[ 4]) << 32; /* fall through */ + case 4: k1 ^= (uint64_t)(tail[ 3]) << 24; /* fall through */ + case 3: k1 ^= (uint64_t)(tail[ 2]) << 16; /* fall through */ + case 2: k1 ^= (uint64_t)(tail[ 1]) << 8; /* fall through */ + case 1: k1 ^= (uint64_t)(tail[ 0]) << 0; /* fall through */ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; };