Skip to content

Commit

Permalink
Merge pull request #30 from Cyan4973/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
Cyan4973 committed Aug 13, 2015
2 parents c7c4ae5 + fb024c0 commit 23212da
Show file tree
Hide file tree
Showing 5 changed files with 133 additions and 49 deletions.
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# You can contact the author at :
# - xxHash source repository : http://code.google.com/p/xxhash/
# ################################################################
# xxhsum : provides 32/64 bits hash of a file, or piped data
# xxhsum : provides 32/64 bits hash of one or multiple files, or stdin
# ################################################################

CFLAGS ?= -O3
Expand Down Expand Up @@ -51,9 +51,15 @@ xxhsum32: xxhash.c xxhsum.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)

test: clean xxhsum
# stdin
./xxhsum < xxhash.c
# multiple files
./xxhsum *
# internal bench
./xxhsum -bi1
# file bench
./xxhsum -bi1 xxhash.c
# memory tests
valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -bi1 xxhash.c
valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H0 xxhash.c
valgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H1 xxhash.c
Expand Down
12 changes: 11 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,17 @@ The reference system uses a Core i5-3340M @2.7GHz
| XXH64 | 13.8 GB/s | 1.9 GB/s |
| XXH32 | 6.8 GB/s | 6.0 GB/s |


### License

The library files `xxhash.c` and `xxhash.h` are BSD licensed.
The utility `xxhsum` is GPL licensed.


### Other languages

Beyond the C reference version,
xxHash is also available on many programming languages,
thanks to great contributors.
They are [listed here](https://code.google.com/p/xxhash/).
They are [listed here](https://code.google.com/p/xxhash/).

20 changes: 20 additions & 0 deletions cmake_unofficial/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
cmake_minimum_required(VERSION 2.6)
cmake_policy(VERSION 2.6)

project(xxhash)

set(XXHASH_LIB_VERSION "0.41.0")
set(XXHASH_LIB_SOVERSION "0")

set(BUILD_STATIC_LIBS ON CACHE BOOL "Set to ON to build static libraries")
if(BUILD_STATIC_LIBS)
add_library(xxhashstatic ../xxhash.c)
set_target_properties(xxhashstatic PROPERTIES OUTPUT_NAME xxhash)
endif(BUILD_STATIC_LIBS)

add_library(xxhash SHARED ../xxhash.c)
set_target_properties(xxhash PROPERTIES
COMPILE_DEFINITIONS "XXHASH_EXPORT"
VERSION "${XXHASH_LIB_VERSION}"
SOVERSION "${XXHASH_LIB_SOVERSION}")

60 changes: 41 additions & 19 deletions xxhash.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,15 @@ You can contact the author at :
/**************************************
* Tuning parameters
**************************************/
/* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
* For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
* If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
* You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
/* XXH_FORCE_DIRECT_UNALIGNED_MEMORY_ACCESS
* Unaligned memory access is automatically enabled for "common" CPU, such as x86/x64.
* For others CPU, the compiler will be more cautious, and insert extra code to ensure proper working with unaligned memory accesses.
* If you know your target CPU efficiently supports unaligned memory accesses, you can force this option manually.
* If your CPU efficiently supports unaligned memory accesses and the compiler did not automatically detected it, you will witness large performance improvement.
* You can also enable this switch from compilation command line / Makefile.
*/
#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# define XXH_USE_UNALIGNED_ACCESS 1
#if !defined(XXH_FORCE_DIRECT_MEMORY_ACCESS) && ( defined(__ARM_FEATURE_UNALIGNED) )
# define XXH_FORCE_DIRECT_MEMORY_ACCESS 1
#endif

/* XXH_ACCEPT_NULL_INPUT_POINTER :
Expand All @@ -61,6 +63,15 @@ You can contact the author at :
*/
#define XXH_FORCE_NATIVE_FORMAT 0

/* XXH_USELESS_ALIGN_BRANCH :
* This is a minor performance trick, only useful with lots of very small keys.
* It means : don't make a test between aligned/unaligned, because performance will be the same.
* It avoids one initial branch per hash.
*/
#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || defined(XXH_FORCE_DIRECT_MEMORY_ACCESS)
# define XXH_USELESS_ALIGN_BRANCH 1
#endif


/**************************************
* Compiler Specific Options
Expand Down Expand Up @@ -113,20 +124,29 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
typedef unsigned long long U64;
#endif


#if defined(XXH_FORCE_DIRECT_MEMORY_ACCESS)

static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }

#else

static U32 XXH_read32(const void* memPtr)
{
U32 val32;
memcpy(&val32, memPtr, 4);
return val32;
U32 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}

static U64 XXH_read64(const void* memPtr)
{
U64 val64;
memcpy(&val64, memPtr, 8);
return val64;
U64 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}

#endif // defined


/******************************************
Expand Down Expand Up @@ -175,8 +195,10 @@ static U64 XXH_swap64 (U64 x)
* Architecture Macros
***************************************/
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
#ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
static const int one = 1;

/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example one the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
static const int one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
#endif

Expand Down Expand Up @@ -315,7 +337,7 @@ FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH
}


unsigned XXH32 (const void* input, size_t len, unsigned seed)
unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
Expand All @@ -326,7 +348,7 @@ unsigned XXH32 (const void* input, size_t len, unsigned seed)
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;

# if !defined(XXH_USE_UNALIGNED_ACCESS)
# if !defined(XXH_USELESS_ALIGN_BRANCH)
if ((((size_t)input) & 3) == 0) /* Input is 4-bytes aligned, leverage the speed benefit */
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
Expand Down Expand Up @@ -466,7 +488,7 @@ unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;

# if !defined(XXH_USE_UNALIGNED_ACCESS)
# if !defined(XXH_USELESS_ALIGN_BRANCH)
if ((((size_t)input) & 7)==0) /* Input is aligned, let's leverage the speed advantage */
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
Expand Down Expand Up @@ -538,7 +560,7 @@ XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)

/*** Hash feed ***/

XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
XXH_errorcode XXH32_reset(XXH32_state_t* state_in, unsigned int seed)
{
XXH_istate32_t* state = (XXH_istate32_t*) state_in;
state->seed = seed;
Expand Down Expand Up @@ -708,7 +730,7 @@ FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endiane
}


U32 XXH32_digest (const XXH32_state_t* state_in)
unsigned int XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;

Expand Down
Loading

0 comments on commit 23212da

Please sign in to comment.