From cefa03b14574d23cdd2f9db5ff28f210e697042c Mon Sep 17 00:00:00 2001 From: Michael Wallner Date: Tue, 14 Jan 2020 13:10:48 +0100 Subject: [PATCH] libhashkit: fix UB on unaligned access optimizations will produce similar instructions anyway --- libhashkit/murmur.cc | 5 ++++- libhashkit/murmur3.cc | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/libhashkit/murmur.cc b/libhashkit/murmur.cc index 3bdacf0e..f3e8fe6a 100644 --- a/libhashkit/murmur.cc +++ b/libhashkit/murmur.cc @@ -56,6 +56,8 @@ #ifdef HAVE_MURMUR_HASH +#include + uint32_t hashkit_murmur(const char *key, size_t length, void *context) { /* @@ -79,7 +81,8 @@ uint32_t hashkit_murmur(const char *key, size_t length, void *context) while(length >= 4) { - unsigned int k = *(unsigned int *)data; + unsigned int k; + memcpy(&k, data, sizeof(unsigned int)); k *= m; k ^= k >> r; diff --git a/libhashkit/murmur3.cc b/libhashkit/murmur3.cc index 6e2f8ed8..8d86cfd8 100644 --- a/libhashkit/murmur3.cc +++ b/libhashkit/murmur3.cc @@ -39,7 +39,13 @@ static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r ) // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here -#define getblock(p, i) (p[i]) +#include +template +static inline T getblock(const T *blocks, int i) { + T b; + memcpy(&b, ((const uint8_t *) blocks) + i * sizeof(T), sizeof(T)); + return b; +} //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche -- 2.30.2