From: Michael Wallner Date: Tue, 14 Jan 2020 12:10:48 +0000 (+0100) Subject: libhashkit: fix UB on unaligned access X-Git-Tag: pre_cmake~44 X-Git-Url: https://git.m6w6.name/?a=commitdiff_plain;h=cefa03b14574d23cdd2f9db5ff28f210e697042c;p=m6w6%2Flibmemcached libhashkit: fix UB on unaligned access optimizations will produce similar instructions anyway --- diff --git a/libhashkit/murmur.cc b/libhashkit/murmur.cc index 3bdacf0e..f3e8fe6a 100644 --- a/libhashkit/murmur.cc +++ b/libhashkit/murmur.cc @@ -56,6 +56,8 @@ #ifdef HAVE_MURMUR_HASH +#include + uint32_t hashkit_murmur(const char *key, size_t length, void *context) { /* @@ -79,7 +81,8 @@ uint32_t hashkit_murmur(const char *key, size_t length, void *context) while(length >= 4) { - unsigned int k = *(unsigned int *)data; + unsigned int k; + memcpy(&k, data, sizeof(unsigned int)); k *= m; k ^= k >> r; diff --git a/libhashkit/murmur3.cc b/libhashkit/murmur3.cc index 6e2f8ed8..8d86cfd8 100644 --- a/libhashkit/murmur3.cc +++ b/libhashkit/murmur3.cc @@ -39,7 +39,13 @@ static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r ) // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here -#define getblock(p, i) (p[i]) +#include +template +static inline T getblock(const T *blocks, int i) { + T b; + memcpy(&b, ((const uint8_t *) blocks) + i * sizeof(T), sizeof(T)); + return b; +} //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche