DOSBox-X
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Defines
src/libs/decoders/xxhash.h
00001 /*
00002  * xxHash - Extremely Fast Hash algorithm
00003  * Header File
00004  * Copyright (C) 2012-2020 Yann Collet
00005  *
00006  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
00007  *
00008  * Redistribution and use in source and binary forms, with or without
00009  * modification, are permitted provided that the following conditions are
00010  * met:
00011  *
00012  *    * Redistributions of source code must retain the above copyright
00013  *      notice, this list of conditions and the following disclaimer.
00014  *    * Redistributions in binary form must reproduce the above
00015  *      copyright notice, this list of conditions and the following disclaimer
00016  *      in the documentation and/or other materials provided with the
00017  *      distribution.
00018  *
00019  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00020  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00021  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00022  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00023  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00025  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00026  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00027  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00029  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030  *
00031  * You can contact the author at:
00032  *   - xxHash homepage: https://www.xxhash.com
00033  *   - xxHash source repository: https://github.com/Cyan4973/xxHash
00034  */
00035 
00036 /* TODO: update */
00037 /* Notice extracted from xxHash homepage:
00038 
00039 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
00040 It also successfully passes all tests from the SMHasher suite.
00041 
00042 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
00043 
00044 Name            Speed       Q.Score   Author
00045 xxHash          5.4 GB/s     10
00046 CrapWow         3.2 GB/s      2       Andrew
00047 MumurHash 3a    2.7 GB/s     10       Austin Appleby
00048 SpookyHash      2.0 GB/s     10       Bob Jenkins
00049 SBox            1.4 GB/s      9       Bret Mulvey
00050 Lookup3         1.2 GB/s      9       Bob Jenkins
00051 SuperFastHash   1.2 GB/s      1       Paul Hsieh
00052 CityHash64      1.05 GB/s    10       Pike & Alakuijala
00053 FNV             0.55 GB/s     5       Fowler, Noll, Vo
00054 CRC32           0.43 GB/s     9
00055 MD5-32          0.33 GB/s    10       Ronald L. Rivest
00056 SHA1-32         0.28 GB/s    10
00057 
00058 Q.Score is a measure of quality of the hash function.
00059 It depends on successfully passing SMHasher test set.
00060 10 is a perfect score.
00061 
00062 Note: SMHasher's CRC32 implementation is not the fastest one.
00063 Other speed-oriented implementations can be faster,
00064 especially in combination with PCLMUL instruction:
00065 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
00066 
00067 A 64-bit version, named XXH64, is available since r35.
00068 It offers much better speed, but for 64-bit applications only.
00069 Name     Speed on 64 bits    Speed on 32 bits
00070 XXH64       13.8 GB/s            1.9 GB/s
00071 XXH32        6.8 GB/s            6.0 GB/s
00072 */
00073 
00074 #if defined (__cplusplus)
00075 extern "C" {
00076 #endif
00077 
00078 /* ****************************
00079  *  INLINE mode
00080  ******************************/
00097 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
00098     && !defined(XXH_INLINE_ALL_31684351384)
00099    /* this section should be traversed only once */
00100 #  define XXH_INLINE_ALL_31684351384
00101    /* give access to the advanced API, required to compile implementations */
00102 #  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
00103 #  define XXH_STATIC_LINKING_ONLY
00104    /* make all functions private */
00105 #  undef XXH_PUBLIC_API
00106 #  if defined(__GNUC__)
00107 #    define XXH_PUBLIC_API static __inline __attribute__((unused))
00108 #  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
00109 #    define XXH_PUBLIC_API static inline
00110 #  elif defined(_MSC_VER)
00111 #    define XXH_PUBLIC_API static __inline
00112 #  else
00113      /* note: this version may generate warnings for unused static functions */
00114 #    define XXH_PUBLIC_API static
00115 #  endif
00116 
00117    /*
00118     * This part deals with the special case where a unit wants to inline xxHash,
00119     * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such
00120     * as part of some previously included *.h header file.
00121     * Without further action, the new include would just be ignored,
00122     * and functions would effectively _not_ be inlined (silent failure).
00123     * The following macros solve this situation by prefixing all inlined names,
00124     * avoiding naming collision with previous inclusions.
00125     */
00126 #  ifdef XXH_NAMESPACE
00127 #    error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported"
00128      /*
00129       * Note: Alternative: #undef all symbols (it's a pretty large list).
00130       * Without #error: it compiles, but functions are actually not inlined.
00131       */
00132 #  endif
00133 #  define XXH_NAMESPACE XXH_INLINE_
00134    /*
00135     * Some identifiers (enums, type names) are not symbols, but they must
00136     * still be renamed to avoid redeclaration.
00137     * Alternative solution: do not redeclare them.
00138     * However, this requires some #ifdefs, and is a more dispersed action.
00139     * Meanwhile, renaming can be achieved in a single block
00140     */
00141 #  define XXH_IPREF(Id)   XXH_INLINE_ ## Id
00142 #  define XXH_OK XXH_IPREF(XXH_OK)
00143 #  define XXH_ERROR XXH_IPREF(XXH_ERROR)
00144 #  define XXH_errorcode XXH_IPREF(XXH_errorcode)
00145 #  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
00146 #  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
00147 #  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
00148 #  define XXH32_state_s XXH_IPREF(XXH32_state_s)
00149 #  define XXH32_state_t XXH_IPREF(XXH32_state_t)
00150 #  define XXH64_state_s XXH_IPREF(XXH64_state_s)
00151 #  define XXH64_state_t XXH_IPREF(XXH64_state_t)
00152 #  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
00153 #  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
00154 #  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
00155    /* Ensure the header is parsed again, even if it was previously included */
00156 #  undef XXHASH_H_5627135585666179
00157 #  undef XXHASH_H_STATIC_13879238742
00158 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
00159 
00160 
00161 
00162 /* ****************************************************************
00163  *  Stable API
00164  *****************************************************************/
00165 #ifndef XXHASH_H_5627135585666179
00166 #define XXHASH_H_5627135585666179 1
00167 
00168 /* specific declaration modes for Windows */
00169 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
00170 #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
00171 #    ifdef XXH_EXPORT
00172 #      define XXH_PUBLIC_API __declspec(dllexport)
00173 #    elif XXH_IMPORT
00174 #      define XXH_PUBLIC_API __declspec(dllimport)
00175 #    endif
00176 #  else
00177 #    define XXH_PUBLIC_API   /* do nothing */
00178 #  endif
00179 #endif
00180 
00194 #ifdef XXH_NAMESPACE
00195 #  define XXH_CAT(A,B) A##B
00196 #  define XXH_NAME2(A,B) XXH_CAT(A,B)
00197 #  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
00198 #  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
00199 #  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
00200 #  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
00201 #  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
00202 #  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
00203 #  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
00204 #  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
00205 #  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
00206 #  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
00207 #  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
00208 #  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
00209 #  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
00210 #  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
00211 #  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
00212 #  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
00213 #  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
00214 #  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
00215 #  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
00216 #endif
00217 
00218 
00219 /* *************************************
00220 *  Version
00221 ***************************************/
00222 #define XXH_VERSION_MAJOR    0
00223 #define XXH_VERSION_MINOR    7
00224 #define XXH_VERSION_RELEASE  4
00225 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
00226 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
00227 
00228 
00229 /* ****************************
00230 *  Definitions
00231 ******************************/
00232 #include <stddef.h>   /* size_t */
00233 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
00234 
00235 
00236 /*-**********************************************************************
00237 *  32-bit hash
00238 ************************************************************************/
00239 #if !defined (__VMS) \
00240   && (defined (__cplusplus) \
00241   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
00242 #   include <stdint.h>
00243     typedef uint32_t XXH32_hash_t;
00244 #else
00245 #   include <limits.h>
00246 #   if UINT_MAX == 0xFFFFFFFFUL
00247       typedef unsigned int XXH32_hash_t;
00248 #   else
00249 #     if ULONG_MAX == 0xFFFFFFFFUL
00250         typedef unsigned long XXH32_hash_t;
00251 #     else
00252 #       error "unsupported platform: need a 32-bit type"
00253 #     endif
00254 #   endif
00255 #endif
00256 
00268 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
00269 
00270 /*******   Streaming   *******/
00271 
00272 /*
00273  * Streaming functions generate the xxHash value from an incrememtal input.
00274  * This method is slower than single-call functions, due to state management.
00275  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
00276  *
00277  * An XXH state must first be allocated using `XXH*_createState()`.
00278  *
00279  * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
00280  *
00281  * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
00282  *
00283  * The function returns an error code, with 0 meaning OK, and any other value
00284  * meaning there is an error.
00285  *
00286  * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
00287  * This function returns the nn-bits hash as an int or long long.
00288  *
00289  * It's still possible to continue inserting input into the hash state after a
00290  * digest, and generate new hash values later on by invoking `XXH*_digest()`.
00291  *
00292  * When done, release the state using `XXH*_freeState()`.
00293  */
00294 
00295 typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
00296 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
00297 XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
00298 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
00299 
00300 XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
00301 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
00302 XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
00303 
00304 /*******   Canonical representation   *******/
00305 
00306 /*
00307  * The default return values from XXH functions are unsigned 32 and 64 bit
00308  * integers.
00309  * This the simplest and fastest format for further post-processing.
00310  *
00311  * However, this leaves open the question of what is the order on the byte level,
00312  * since little and big endian conventions will store the same number differently.
00313  *
00314  * The canonical representation settles this issue by mandating big-endian
00315  * convention, the same convention as human-readable numbers (large digits first).
00316  *
00317  * When writing hash values to storage, sending them over a network, or printing
00318  * them, it's highly recommended to use the canonical representation to ensure
00319  * portability across a wider range of systems, present and future.
00320  *
00321  * The following functions allow transformation of hash values to and from
00322  * canonical format.
00323  */
00324 
00325 typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
00326 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
00327 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
00328 
00329 
00330 #ifndef XXH_NO_LONG_LONG
00331 /*-**********************************************************************
00332 *  64-bit hash
00333 ************************************************************************/
00334 #if !defined (__VMS) \
00335   && (defined (__cplusplus) \
00336   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
00337 #   include <stdint.h>
00338     typedef uint64_t XXH64_hash_t;
00339 #else
00340     /* the following type must have a width of 64-bit */
00341     typedef unsigned long long XXH64_hash_t;
00342 #endif
00343 
00357 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
00358 
00359 /*******   Streaming   *******/
00360 typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
00361 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
00362 XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
00363 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
00364 
00365 XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, XXH64_hash_t seed);
00366 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
00367 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
00368 
00369 /*******   Canonical representation   *******/
00370 typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
00371 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
00372 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
00373 
00374 
00375 #endif  /* XXH_NO_LONG_LONG */
00376 
00377 #endif /* XXHASH_H_5627135585666179 */
00378 
00379 
00380 
00381 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
00382 #define XXHASH_H_STATIC_13879238742
00383 /* ****************************************************************************
00384  * This section contains declarations which are not guaranteed to remain stable.
00385  * They may change in future versions, becoming incompatible with a different
00386  * version of the library.
00387  * These declarations should only be used with static linking.
00388  * Never use them in association with dynamic linking!
00389  ***************************************************************************** */
00390 
00391 /*
00392  * These definitions are only present to allow static allocation of an XXH
00393  * state, for example, on the stack or in a struct.
00394  * Never **ever** access members directly.
00395  */
00396 
00397 struct XXH32_state_s {
00398    XXH32_hash_t total_len_32;
00399    XXH32_hash_t large_len;
00400    XXH32_hash_t v1;
00401    XXH32_hash_t v2;
00402    XXH32_hash_t v3;
00403    XXH32_hash_t v4;
00404    XXH32_hash_t mem32[4];
00405    XXH32_hash_t memsize;
00406    XXH32_hash_t reserved;   /* never read nor write, might be removed in a future version */
00407 };   /* typedef'd to XXH32_state_t */
00408 
00409 
00410 #ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
00411 
00412 struct XXH64_state_s {
00413    XXH64_hash_t total_len;
00414    XXH64_hash_t v1;
00415    XXH64_hash_t v2;
00416    XXH64_hash_t v3;
00417    XXH64_hash_t v4;
00418    XXH64_hash_t mem64[4];
00419    XXH32_hash_t memsize;
00420    XXH32_hash_t reserved32;  /* required for padding anyway */
00421    XXH64_hash_t reserved64;  /* never read nor write, might be removed in a future version */
00422 };   /* typedef'd to XXH64_state_t */
00423 
00424 
00425 /*-**********************************************************************
00426 *  XXH3
00427 *  New experimental hash
00428 ************************************************************************/
00429 
00430 /* ************************************************************************
00431  * XXH3 is a new hash algorithm featuring:
00432  *  - Improved speed for both small and large inputs
00433  *  - True 64-bit and 128-bit outputs
00434  *  - SIMD acceleration
00435  *  - Improved 32-bit viability
00436  *
00437  * Speed analysis methodology is explained here:
00438  *
00439  *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
00440  *
00441  * In general, expect XXH3 to run about ~2x faster on large inputs and >3x
00442  * faster on small ones compared to XXH64, though exact differences depend on
00443  * the platform.
00444  *
00445  * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash
00446  * on all platforms.
00447  *
00448  * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it.
00449  *
00450  * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run
00451  * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are
00452  * explained in the implementation.
00453  *
00454  * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
00455  * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro.
00456  *
00457  * XXH3 offers 2 variants, _64bits and _128bits.
00458  * When only 64 bits are needed, prefer calling the _64bits variant, as it
00459  * reduces the amount of mixing, resulting in faster speed on small inputs.
00460  *
00461  * It's also generally simpler to manipulate a scalar return type than a struct.
00462  *
00463  * The 128-bit version adds additional strength, but it is slightly slower.
00464  *
00465  * The XXH3 algorithm is still in development.
00466  * The results it produces may still change in future versions.
00467  *
00468  * Results produced by v0.7.x are not comparable with results from v0.7.y.
00469  * However, the API is completely stable, and it can safely be used for
00470  * ephemeral data (local sessions).
00471  *
00472  * Avoid storing values in long-term storage until the algorithm is finalized.
00473  *
00474  * Since v0.7.3, XXH3 has reached "release candidate" status, meaning that, if
00475  * everything remains fine, its current format will be "frozen" and become the
00476  * final one.
00477  *
00478  * After which, return values of XXH3 and XXH128 will no longer change in
00479  * future versions.
00480  *
00481  * XXH3's return values will be officially finalized upon reaching v0.8.0.
00482  *
00483  * The API supports one-shot hashing, streaming mode, and custom secrets.
00484  */
00485 
00486 #ifdef XXH_NAMESPACE
00487 #  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
00488 #  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
00489 #  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
00490 
00491 #  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
00492 #  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
00493 #  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
00494 
00495 #  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
00496 #  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
00497 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
00498 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
00499 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
00500 #endif
00501 
00502 /* XXH3_64bits():
00503  * default 64-bit variant, using default secret and default seed of 0.
00504  * It's the fastest variant. */
00505 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
00506 
00507 /*
00508  * XXH3_64bits_withSecret():
00509  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
00510  * This makes it more difficult for an external actor to prepare an intentional
00511  * collision.
00512  * The secret *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
00513  * It should consist of random bytes.
00514  * Avoid trivial sequences, such as repeating sequences and especially '\0',
00515  * as this can cancel out itself.
00516  * Failure to respect these conditions will result in a poor quality hash.
00517  */
00518 #define XXH3_SECRET_SIZE_MIN 136
00519 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
00520 
00521 /*
00522  * XXH3_64bits_withSeed():
00523  * This variant generates a custom secret on the fly based on the default
00524  * secret, altered using the `seed` value.
00525  * While this operation is decently fast, note that it's not completely free.
00526  * Note: seed==0 produces the same results as XXH3_64bits().
00527  */
00528 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
00529 
00530 
00531 /* streaming 64-bit */
00532 
00533 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)   /* C11+ */
00534 #  include <stdalign.h>
00535 #  define XXH_ALIGN(n)      alignas(n)
00536 #elif defined(__GNUC__)
00537 #  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
00538 #elif defined(_MSC_VER)
00539 #  define XXH_ALIGN(n)      __declspec(align(n))
00540 #else
00541 #  define XXH_ALIGN(n)   /* disabled */
00542 #endif
00543 
00544 /* Old GCC versions only accept the attribute after the type in structures. */
00545 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
00546     && defined(__GNUC__)
00547 #   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
00548 #else
00549 #   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
00550 #endif
00551 
00552 typedef struct XXH3_state_s XXH3_state_t;
00553 
00554 #define XXH3_SECRET_DEFAULT_SIZE 192   /* >= XXH3_SECRET_SIZE_MIN */
00555 #define XXH3_INTERNALBUFFER_SIZE 256
00556 struct XXH3_state_s {
00557    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
00558   /* used to store a custom secret generated from a seed */
00559    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
00560    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
00561    XXH32_hash_t bufferedSize;
00562    XXH32_hash_t reserved32;
00563    size_t nbStripesPerBlock;
00564    size_t nbStripesSoFar;
00565    size_t secretLimit;
00566    XXH64_hash_t totalLen;
00567    XXH64_hash_t seed;
00568    XXH64_hash_t reserved64;
00569    const unsigned char* extSecret;  /* reference to external secret;
00570                                      * if == NULL, use .customSecret instead */
00571    /* note: there may be some padding at the end due to alignment on 64 bytes */
00572 }; /* typedef'd to XXH3_state_t */
00573 
00574 #undef XXH_ALIGN_MEMBER
00575 
00576 /*
00577  * Streaming requires state maintenance.
00578  * This operation costs memory and CPU.
00579  * As a consequence, streaming is slower than one-shot hashing.
00580  * For better performance, prefer one-shot functions whenever possible.
00581  */
00582 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
00583 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
00584 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
00585 
00586 
00587 /*
00588  * XXH3_64bits_reset():
00589  * Initialize with the default parameters.
00590  * The result will be equivalent to `XXH3_64bits()`.
00591  */
00592 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
00593 /*
00594  * XXH3_64bits_reset_withSeed():
00595  * Generate a custom secret from `seed`, and store it into `statePtr`.
00596  * digest will be equivalent to `XXH3_64bits_withSeed()`.
00597  */
00598 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
00599 /*
00600  * XXH3_64bits_reset_withSecret():
00601  * `secret` is referenced, and must outlive the hash streaming session, so
00602  * be careful when using stack arrays.
00603  * `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`.
00604  */
00605 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
00606 
00607 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
00608 XXH_PUBLIC_API XXH64_hash_t  XXH3_64bits_digest (const XXH3_state_t* statePtr);
00609 
00610 
00611 /* 128-bit */
00612 
00613 #ifdef XXH_NAMESPACE
00614 #  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
00615 #  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
00616 #  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
00617 #  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
00618 
00619 #  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
00620 #  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
00621 #  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
00622 #  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
00623 #  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
00624 
00625 #  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
00626 #  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
00627 #  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
00628 #  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
00629 #endif
00630 
00631 typedef struct {
00632     XXH64_hash_t low64;
00633     XXH64_hash_t high64;
00634 } XXH128_hash_t;
00635 
00636 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
00637 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
00638 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);  /* == XXH128() */
00639 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
00640 
00641 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
00642 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
00643 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
00644 
00645 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
00646 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
00647 
00648 
00649 /* Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
00650 
00655 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
00656 
00666 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
00667 
00668 
00669 /*******   Canonical representation   *******/
00670 typedef struct { unsigned char digest[16]; } XXH128_canonical_t;
00671 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
00672 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
00673 
00674 
00675 #endif  /* XXH_NO_LONG_LONG */
00676 
00677 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
00678 #  define XXH_IMPLEMENTATION
00679 #endif
00680 
00681 #endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
00682 
00683 
00684 /* ======================================================================== */
00685 /* ======================================================================== */
00686 /* ======================================================================== */
00687 
00688 
00689 /*-**********************************************************************
00690  * xxHash implementation
00691  *-**********************************************************************
00692  * xxHash's implementation used to be found in xxhash.c.
00693  *
00694  * However, code inlining requires the implementation to be visible to the
00695  * compiler, usually within the header.
00696  *
00697  * As a workaround, xxhash.c used to be included within xxhash.h. This caused
00698  * some issues with some build systems, especially ones which treat .c files
00699  * as source files.
00700  *
00701  * Therefore, the implementation is now directly integrated within xxhash.h.
00702  * Another small advantage is that xxhash.c is no longer needed in /include.
00703  ************************************************************************/
00704 
00705 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
00706    || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
00707 #  define XXH_IMPLEM_13a8737387
00708 
00709 /* *************************************
00710 *  Tuning parameters
00711 ***************************************/
00743 #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
00744 #  if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6)
00745 #    define XXH_FORCE_MEMORY_ACCESS 2
00746 #  elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
00747   (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)))
00748 #    define XXH_FORCE_MEMORY_ACCESS 1
00749 #  endif
00750 #endif
00751 
00759 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER   /* can be defined externally */
00760 #  define XXH_ACCEPT_NULL_INPUT_POINTER 0
00761 #endif
00762 
00773 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
00774 #  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
00775 #    define XXH_FORCE_ALIGN_CHECK 0
00776 #  else
00777 #    define XXH_FORCE_ALIGN_CHECK 1
00778 #  endif
00779 #endif
00780 
00800 #ifndef XXH_NO_INLINE_HINTS
00801 #  if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
00802    || defined(__NO_INLINE__)     /* -O0, -fno-inline */
00803 #    define XXH_NO_INLINE_HINTS 1
00804 #  else
00805 #    define XXH_NO_INLINE_HINTS 0
00806 #  endif
00807 #endif
00808 
00816 #ifndef XXH_REROLL
00817 #  if defined(__OPTIMIZE_SIZE__)
00818 #    define XXH_REROLL 1
00819 #  else
00820 #    define XXH_REROLL 0
00821 #  endif
00822 #endif
00823 
00824 
00825 /* *************************************
00826 *  Includes & Memory related functions
00827 ***************************************/
00832 #include <stdlib.h>
00833 
00834 static void* XXH_malloc(size_t s) { return malloc(s); }
00835 static void XXH_free(void* p) { free(p); }
00836 
00838 #include <string.h>
00839 static void* XXH_memcpy(void* dest, const void* src, size_t size)
00840 {
00841     return memcpy(dest,src,size);
00842 }
00843 
00844 #include <limits.h>   /* ULLONG_MAX */
00845 
00846 
00847 /* *************************************
00848 *  Compiler Specific Options
00849 ***************************************/
00850 #ifdef _MSC_VER /* Visual Studio warning fix */
00851 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
00852 #endif
00853 
00854 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
00855 #  define XXH_FORCE_INLINE static
00856 #  define XXH_NO_INLINE static
00857 #elif defined(_MSC_VER)    /* Visual Studio */
00858 #  define XXH_FORCE_INLINE static __forceinline
00859 #  define XXH_NO_INLINE static __declspec(noinline)
00860 #else
00861 #  if defined (__cplusplus) \
00862     || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
00863 #    ifdef __GNUC__
00864 #      define XXH_FORCE_INLINE static inline __attribute__((always_inline))
00865 #      define XXH_NO_INLINE static __attribute__((noinline))
00866 #    else
00867 #      define XXH_FORCE_INLINE static inline
00868 #      define XXH_NO_INLINE static
00869 #    endif
00870 #  else
00871 #    define XXH_FORCE_INLINE static
00872 #    define XXH_NO_INLINE static
00873 #  endif /* __STDC_VERSION__ */
00874 #endif
00875 
00876 
00877 
00878 /* *************************************
00879 *  Debug
00880 ***************************************/
00881 /*
00882  * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
00883  * compiler's command line options. The value must be a number.
00884  */
00885 #ifndef XXH_DEBUGLEVEL
00886 #  ifdef DEBUGLEVEL /* backwards compat */
00887 #    define XXH_DEBUGLEVEL DEBUGLEVEL
00888 #  else
00889 #    define XXH_DEBUGLEVEL 0
00890 #  endif
00891 #endif
00892 
00893 #if (XXH_DEBUGLEVEL>=1)
00894 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
00895 #  define XXH_ASSERT(c)   assert(c)
00896 #else
00897 #  define XXH_ASSERT(c)   ((void)0)
00898 #endif
00899 
00900 /* note: use after variable declarations */
00901 #define XXH_STATIC_ASSERT(c)  do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
00902 
00903 
00904 /* *************************************
00905 *  Basic Types
00906 ***************************************/
00907 #if !defined (__VMS) \
00908  && (defined (__cplusplus) \
00909  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
00910 # include <stdint.h>
00911   typedef uint8_t xxh_u8;
00912 #else
00913   typedef unsigned char xxh_u8;
00914 #endif
00915 typedef XXH32_hash_t xxh_u32;
00916 
00917 #ifdef XXH_OLD_NAMES
00918 #  define BYTE xxh_u8
00919 #  define U8   xxh_u8
00920 #  define U32  xxh_u32
00921 #endif
00922 
00923 /* ***   Memory access   *** */
00924 
00925 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
00926 /*
00927  * Manual byteshift. Best for old compilers which don't inline memcpy.
00928  * We actually directly use XXH_readLE32 and XXH_readBE32.
00929  */
00930 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
00931 
00932 /*
00933  * Force direct memory access. Only works on CPU which support unaligned memory
00934  * access in hardware.
00935  */
00936 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
00937 
00938 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
00939 
00940 /*
00941  * __pack instructions are safer but compiler specific, hence potentially
00942  * problematic for some compilers.
00943  *
00944  * Currently only defined for GCC and ICC.
00945  */
00946 #ifdef XXH_OLD_NAMES
00947 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
00948 #endif
00949 static xxh_u32 XXH_read32(const void* ptr)
00950 {
00951     typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
00952     return ((const xxh_unalign*)ptr)->u32;
00953 }
00954 
00955 #else
00956 
00957 /*
00958  * Portable and safe solution. Generally efficient.
00959  * see: https://stackoverflow.com/a/32095106/646947
00960  */
00961 static xxh_u32 XXH_read32(const void* memPtr)
00962 {
00963     xxh_u32 val;
00964     memcpy(&val, memPtr, sizeof(val));
00965     return val;
00966 }
00967 
00968 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
00969 
00970 
00971 /* ***   Endianess   *** */
00972 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
00973 
00982 #ifndef XXH_CPU_LITTLE_ENDIAN
00983 /*
00984  * Try to detect endianness automatically, to avoid the nonstandard behavior
00985  * in `XXH_isLittleEndian()`
00986  */
00987 #  if defined(_WIN32) /* Windows is always little endian */ \
00988      || defined(__LITTLE_ENDIAN__) \
00989      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
00990 #    define XXH_CPU_LITTLE_ENDIAN 1
00991 #  elif defined(__BIG_ENDIAN__) \
00992      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
00993 #    define XXH_CPU_LITTLE_ENDIAN 0
00994 #  else
00995 static int XXH_isLittleEndian(void)
00996 {
00997     /*
00998      * Nonstandard, but well-defined behavior in practice.
00999      * Don't use static: it is detrimental to performance.
01000      */
01001     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
01002     return one.c[0];
01003 }
01004 #   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
01005 #  endif
01006 #endif
01007 
01008 
01009 
01010 
01011 /* ****************************************
01012 *  Compiler-specific Functions and Macros
01013 ******************************************/
01014 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
01015 
01016 #ifdef __has_builtin
01017 #  define XXH_HAS_BUILTIN(x) __has_builtin(x)
01018 #else
01019 #  define XXH_HAS_BUILTIN(x) 0
01020 #endif
01021 
01022 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
01023                                && XXH_HAS_BUILTIN(__builtin_rotateleft64)
01024 #  define XXH_rotl32 __builtin_rotateleft32
01025 #  define XXH_rotl64 __builtin_rotateleft64
01026 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
01027 #elif defined(_MSC_VER)
01028 #  define XXH_rotl32(x,r) _rotl(x,r)
01029 #  define XXH_rotl64(x,r) _rotl64(x,r)
01030 #else
01031 #  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
01032 #  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
01033 #endif
01034 
01035 #if defined(_MSC_VER)     /* Visual Studio */
01036 #  define XXH_swap32 _byteswap_ulong
01037 #elif XXH_GCC_VERSION >= 403
01038 #  define XXH_swap32 __builtin_bswap32
01039 #else
01040 static xxh_u32 XXH_swap32 (xxh_u32 x)
01041 {
01042     return  ((x << 24) & 0xff000000 ) |
01043             ((x <<  8) & 0x00ff0000 ) |
01044             ((x >>  8) & 0x0000ff00 ) |
01045             ((x >> 24) & 0x000000ff );
01046 }
01047 #endif
01048 
01049 
01050 /* ***************************
01051 *  Memory reads
01052 *****************************/
01053 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
01054 
01055 /*
01056  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
01057  *
01058  * This is ideal for older compilers which don't inline memcpy.
01059  */
01060 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
01061 
01062 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
01063 {
01064     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
01065     return bytePtr[0]
01066          | ((xxh_u32)bytePtr[1] << 8)
01067          | ((xxh_u32)bytePtr[2] << 16)
01068          | ((xxh_u32)bytePtr[3] << 24);
01069 }
01070 
01071 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
01072 {
01073     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
01074     return bytePtr[3]
01075          | ((xxh_u32)bytePtr[2] << 8)
01076          | ((xxh_u32)bytePtr[1] << 16)
01077          | ((xxh_u32)bytePtr[0] << 24);
01078 }
01079 
01080 #else
01081 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
01082 {
01083     return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
01084 }
01085 
01086 static xxh_u32 XXH_readBE32(const void* ptr)
01087 {
01088     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
01089 }
01090 #endif
01091 
01092 XXH_FORCE_INLINE xxh_u32
01093 XXH_readLE32_align(const void* ptr, XXH_alignment align)
01094 {
01095     if (align==XXH_unaligned) {
01096         return XXH_readLE32(ptr);
01097     } else {
01098         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
01099     }
01100 }
01101 
01102 
01103 /* *************************************
01104 *  Misc
01105 ***************************************/
01106 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
01107 
01108 
01109 /* *******************************************************************
01110 *  32-bit hash functions
01111 *********************************************************************/
01112 static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U;   /* 0b10011110001101110111100110110001 */
01113 static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U;   /* 0b10000101111010111100101001110111 */
01114 static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU;   /* 0b11000010101100101010111000111101 */
01115 static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU;   /* 0b00100111110101001110101100101111 */
01116 static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U;   /* 0b00010110010101100110011110110001 */
01117 
01118 #ifdef XXH_OLD_NAMES
01119 #  define PRIME32_1 XXH_PRIME32_1
01120 #  define PRIME32_2 XXH_PRIME32_2
01121 #  define PRIME32_3 XXH_PRIME32_3
01122 #  define PRIME32_4 XXH_PRIME32_4
01123 #  define PRIME32_5 XXH_PRIME32_5
01124 #endif
01125 
01126 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
01127 {
01128     acc += input * XXH_PRIME32_2;
01129     acc  = XXH_rotl32(acc, 13);
01130     acc *= XXH_PRIME32_1;
01131 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
01132     /*
01133      * UGLY HACK:
01134      * This inline assembly hack forces acc into a normal register. This is the
01135      * only thing that prevents GCC and Clang from autovectorizing the XXH32
01136      * loop (pragmas and attributes don't work for some resason) without globally
01137      * disabling SSE4.1.
01138      *
01139      * The reason we want to avoid vectorization is because despite working on
01140      * 4 integers at a time, there are multiple factors slowing XXH32 down on
01141      * SSE4:
01142      * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
01143      *   newer chips!) making it slightly slower to multiply four integers at
01144      *   once compared to four integers independently. Even when pmulld was
01145      *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
01146      *   just to multiply unless doing a long operation.
01147      *
01148      * - Four instructions are required to rotate,
01149      *      movqda tmp,  v // not required with VEX encoding
01150      *      pslld  tmp, 13 // tmp <<= 13
01151      *      psrld  v,   19 // x >>= 19
01152      *      por    v,  tmp // x |= tmp
01153      *   compared to one for scalar:
01154      *      roll   v, 13    // reliably fast across the board
01155      *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
01156      *
01157      * - Instruction level parallelism is actually more beneficial here because
01158      *   the SIMD actually serializes this operation: While v1 is rotating, v2
01159      *   can load data, while v3 can multiply. SSE forces them to operate
01160      *   together.
01161      *
01162      * How this hack works:
01163      * __asm__(""       // Declare an assembly block but don't declare any instructions
01164      *          :       // However, as an Input/Output Operand,
01165      *          "+r"    // constrain a read/write operand (+) as a general purpose register (r).
01166      *          (acc)   // and set acc as the operand
01167      * );
01168      *
01169      * Because of the 'r', the compiler has promised that seed will be in a
01170      * general purpose register and the '+' says that it will be 'read/write',
01171      * so it has to assume it has changed. It is like volatile without all the
01172      * loads and stores.
01173      *
01174      * Since the argument has to be in a normal register (not an SSE register),
01175      * each time XXH32_round is called, it is impossible to vectorize.
01176      */
01177     __asm__("" : "+r" (acc));
01178 #endif
01179     return acc;
01180 }
01181 
01182 /* mix all bits */
01183 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
01184 {
01185     h32 ^= h32 >> 15;
01186     h32 *= XXH_PRIME32_2;
01187     h32 ^= h32 >> 13;
01188     h32 *= XXH_PRIME32_3;
01189     h32 ^= h32 >> 16;
01190     return(h32);
01191 }
01192 
01193 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
01194 
01195 static xxh_u32
01196 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
01197 {
01198 #define XXH_PROCESS1 do {                           \
01199     h32 += (*ptr++) * XXH_PRIME32_5;                \
01200     h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1;      \
01201 } while (0)
01202 
01203 #define XXH_PROCESS4 do {                           \
01204     h32 += XXH_get32bits(ptr) * XXH_PRIME32_3;      \
01205     ptr += 4;                                   \
01206     h32  = XXH_rotl32(h32, 17) * XXH_PRIME32_4;     \
01207 } while (0)
01208 
01209     /* Compact rerolled version */
01210     if (XXH_REROLL) {
01211         len &= 15;
01212         while (len >= 4) {
01213             XXH_PROCESS4;
01214             len -= 4;
01215         }
01216         while (len > 0) {
01217             XXH_PROCESS1;
01218             --len;
01219         }
01220         return XXH32_avalanche(h32);
01221     } else {
01222          switch(len&15) /* or switch(bEnd - p) */ {
01223            case 12:      XXH_PROCESS4;
01224                          /* fallthrough */
01225            case 8:       XXH_PROCESS4;
01226                          /* fallthrough */
01227            case 4:       XXH_PROCESS4;
01228                          return XXH32_avalanche(h32);
01229 
01230            case 13:      XXH_PROCESS4;
01231                          /* fallthrough */
01232            case 9:       XXH_PROCESS4;
01233                          /* fallthrough */
01234            case 5:       XXH_PROCESS4;
01235                          XXH_PROCESS1;
01236                          return XXH32_avalanche(h32);
01237 
01238            case 14:      XXH_PROCESS4;
01239                          /* fallthrough */
01240            case 10:      XXH_PROCESS4;
01241                          /* fallthrough */
01242            case 6:       XXH_PROCESS4;
01243                          XXH_PROCESS1;
01244                          XXH_PROCESS1;
01245                          return XXH32_avalanche(h32);
01246 
01247            case 15:      XXH_PROCESS4;
01248                          /* fallthrough */
01249            case 11:      XXH_PROCESS4;
01250                          /* fallthrough */
01251            case 7:       XXH_PROCESS4;
01252                          /* fallthrough */
01253            case 3:       XXH_PROCESS1;
01254                          /* fallthrough */
01255            case 2:       XXH_PROCESS1;
01256                          /* fallthrough */
01257            case 1:       XXH_PROCESS1;
01258                          /* fallthrough */
01259            case 0:       return XXH32_avalanche(h32);
01260         }
01261         XXH_ASSERT(0);
01262         return h32;   /* reaching this point is deemed impossible */
01263     }
01264 }
01265 
01266 #ifdef XXH_OLD_NAMES
01267 #  define PROCESS1 XXH_PROCESS1
01268 #  define PROCESS4 XXH_PROCESS4
01269 #else
01270 #  undef XXH_PROCESS1
01271 #  undef XXH_PROCESS4
01272 #endif
01273 
01274 XXH_FORCE_INLINE xxh_u32
01275 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
01276 {
01277     const xxh_u8* bEnd = input + len;
01278     xxh_u32 h32;
01279 
01280 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
01281     if (input==NULL) {
01282         len=0;
01283         bEnd=input=(const xxh_u8*)(size_t)16;
01284     }
01285 #endif
01286 
01287     if (len>=16) {
01288         const xxh_u8* const limit = bEnd - 15;
01289         xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
01290         xxh_u32 v2 = seed + XXH_PRIME32_2;
01291         xxh_u32 v3 = seed + 0;
01292         xxh_u32 v4 = seed - XXH_PRIME32_1;
01293 
01294         do {
01295             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
01296             v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
01297             v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
01298             v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
01299         } while (input < limit);
01300 
01301         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
01302             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
01303     } else {
01304         h32  = seed + XXH_PRIME32_5;
01305     }
01306 
01307     h32 += (xxh_u32)len;
01308 
01309     return XXH32_finalize(h32, input, len&15, align);
01310 }
01311 
01312 
01313 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
01314 {
01315 #if 0
01316     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
01317     XXH32_state_t state;
01318     XXH32_reset(&state, seed);
01319     XXH32_update(&state, (const xxh_u8*)input, len);
01320     return XXH32_digest(&state);
01321 
01322 #else
01323 
01324     if (XXH_FORCE_ALIGN_CHECK) {
01325         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
01326             return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
01327     }   }
01328 
01329     return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
01330 #endif
01331 }
01332 
01333 
01334 
01335 /*******   Hash streaming   *******/
01336 
01337 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
01338 {
01339     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
01340 }
01341 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
01342 {
01343     XXH_free(statePtr);
01344     return XXH_OK;
01345 }
01346 
01347 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
01348 {
01349     memcpy(dstState, srcState, sizeof(*dstState));
01350 }
01351 
01352 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
01353 {
01354     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
01355     memset(&state, 0, sizeof(state));
01356     state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
01357     state.v2 = seed + XXH_PRIME32_2;
01358     state.v3 = seed + 0;
01359     state.v4 = seed - XXH_PRIME32_1;
01360     /* do not write into reserved, planned to be removed in a future version */
01361     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
01362     return XXH_OK;
01363 }
01364 
01365 
01366 XXH_PUBLIC_API XXH_errorcode
01367 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
01368 {
01369     if (input==NULL)
01370 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
01371         return XXH_OK;
01372 #else
01373         return XXH_ERROR;
01374 #endif
01375 
01376     {   const xxh_u8* p = (const xxh_u8*)input;
01377         const xxh_u8* const bEnd = p + len;
01378 
01379         state->total_len_32 += (XXH32_hash_t)len;
01380         state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
01381 
01382         if (state->memsize + len < 16)  {   /* fill in tmp buffer */
01383             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
01384             state->memsize += (XXH32_hash_t)len;
01385             return XXH_OK;
01386         }
01387 
01388         if (state->memsize) {   /* some data left from previous update */
01389             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
01390             {   const xxh_u32* p32 = state->mem32;
01391                 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
01392                 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
01393                 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
01394                 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
01395             }
01396             p += 16-state->memsize;
01397             state->memsize = 0;
01398         }
01399 
01400         if (p <= bEnd-16) {
01401             const xxh_u8* const limit = bEnd - 16;
01402             xxh_u32 v1 = state->v1;
01403             xxh_u32 v2 = state->v2;
01404             xxh_u32 v3 = state->v3;
01405             xxh_u32 v4 = state->v4;
01406 
01407             do {
01408                 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
01409                 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
01410                 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
01411                 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
01412             } while (p<=limit);
01413 
01414             state->v1 = v1;
01415             state->v2 = v2;
01416             state->v3 = v3;
01417             state->v4 = v4;
01418         }
01419 
01420         if (p < bEnd) {
01421             XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
01422             state->memsize = (unsigned)(bEnd-p);
01423         }
01424     }
01425 
01426     return XXH_OK;
01427 }
01428 
01429 
01430 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
01431 {
01432     xxh_u32 h32;
01433 
01434     if (state->large_len) {
01435         h32 = XXH_rotl32(state->v1, 1)
01436             + XXH_rotl32(state->v2, 7)
01437             + XXH_rotl32(state->v3, 12)
01438             + XXH_rotl32(state->v4, 18);
01439     } else {
01440         h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
01441     }
01442 
01443     h32 += state->total_len_32;
01444 
01445     return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
01446 }
01447 
01448 
01449 /*******   Canonical representation   *******/
01450 
01451 /*
01452  * The default return values from XXH functions are unsigned 32 and 64 bit
01453  * integers.
01454  *
01455  * The canonical representation uses big endian convention, the same convention
01456  * as human-readable numbers (large digits first).
01457  *
01458  * This way, hash values can be written into a file or buffer, remaining
01459  * comparable across different systems.
01460  *
01461  * The following functions allow transformation of hash values to and from their
01462  * canonical format.
01463  */
01464 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
01465 {
01466     XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
01467     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
01468     memcpy(dst, &hash, sizeof(*dst));
01469 }
01470 
01471 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
01472 {
01473     return XXH_readBE32(src);
01474 }
01475 
01476 
01477 #ifndef XXH_NO_LONG_LONG
01478 
01479 /* *******************************************************************
01480 *  64-bit hash functions
01481 *********************************************************************/
01482 
01483 /*******   Memory access   *******/
01484 
01485 typedef XXH64_hash_t xxh_u64;
01486 
01487 #ifdef XXH_OLD_NAMES
01488 #  define U64 xxh_u64
01489 #endif
01490 
01507 #ifndef XXH_REROLL_XXH64
01508 #  if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
01509    || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
01510      || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
01511      || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
01512      || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
01513    || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
01514 #    define XXH_REROLL_XXH64 1
01515 #  else
01516 #    define XXH_REROLL_XXH64 0
01517 #  endif
01518 #endif /* !defined(XXH_REROLL_XXH64) */
01519 
01520 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
01521 /*
01522  * Manual byteshift. Best for old compilers which don't inline memcpy.
01523  * We actually directly use XXH_readLE64 and XXH_readBE64.
01524  */
01525 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
01526 
01527 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
01528 static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; }
01529 
01530 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
01531 
01532 /*
01533  * __pack instructions are safer, but compiler specific, hence potentially
01534  * problematic for some compilers.
01535  *
01536  * Currently only defined for GCC and ICC.
01537  */
01538 #ifdef XXH_OLD_NAMES
01539 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
01540 #endif
01541 static xxh_u64 XXH_read64(const void* ptr)
01542 {
01543     typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
01544     return ((const xxh_unalign64*)ptr)->u64;
01545 }
01546 
01547 #else
01548 
01549 /*
01550  * Portable and safe solution. Generally efficient.
01551  * see: https://stackoverflow.com/a/32095106/646947
01552  */
01553 static xxh_u64 XXH_read64(const void* memPtr)
01554 {
01555     xxh_u64 val;
01556     memcpy(&val, memPtr, sizeof(val));
01557     return val;
01558 }
01559 
01560 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
01561 
01562 #if defined(_MSC_VER)     /* Visual Studio */
01563 #  define XXH_swap64 _byteswap_uint64
01564 #elif XXH_GCC_VERSION >= 403
01565 #  define XXH_swap64 __builtin_bswap64
01566 #else
01567 static xxh_u64 XXH_swap64 (xxh_u64 x)
01568 {
01569     return  ((x << 56) & 0xff00000000000000ULL) |
01570             ((x << 40) & 0x00ff000000000000ULL) |
01571             ((x << 24) & 0x0000ff0000000000ULL) |
01572             ((x << 8)  & 0x000000ff00000000ULL) |
01573             ((x >> 8)  & 0x00000000ff000000ULL) |
01574             ((x >> 24) & 0x0000000000ff0000ULL) |
01575             ((x >> 40) & 0x000000000000ff00ULL) |
01576             ((x >> 56) & 0x00000000000000ffULL);
01577 }
01578 #endif
01579 
01580 
01581 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
01582 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
01583 
01584 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
01585 {
01586     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
01587     return bytePtr[0]
01588          | ((xxh_u64)bytePtr[1] << 8)
01589          | ((xxh_u64)bytePtr[2] << 16)
01590          | ((xxh_u64)bytePtr[3] << 24)
01591          | ((xxh_u64)bytePtr[4] << 32)
01592          | ((xxh_u64)bytePtr[5] << 40)
01593          | ((xxh_u64)bytePtr[6] << 48)
01594          | ((xxh_u64)bytePtr[7] << 56);
01595 }
01596 
01597 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
01598 {
01599     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
01600     return bytePtr[7]
01601          | ((xxh_u64)bytePtr[6] << 8)
01602          | ((xxh_u64)bytePtr[5] << 16)
01603          | ((xxh_u64)bytePtr[4] << 24)
01604          | ((xxh_u64)bytePtr[3] << 32)
01605          | ((xxh_u64)bytePtr[2] << 40)
01606          | ((xxh_u64)bytePtr[1] << 48)
01607          | ((xxh_u64)bytePtr[0] << 56);
01608 }
01609 
01610 #else
01611 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
01612 {
01613     return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
01614 }
01615 
01616 static xxh_u64 XXH_readBE64(const void* ptr)
01617 {
01618     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
01619 }
01620 #endif
01621 
01622 XXH_FORCE_INLINE xxh_u64
01623 XXH_readLE64_align(const void* ptr, XXH_alignment align)
01624 {
01625     if (align==XXH_unaligned)
01626         return XXH_readLE64(ptr);
01627     else
01628         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
01629 }
01630 
01631 
01632 /*******   xxh64   *******/
01633 
01634 static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL;   /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
01635 static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;   /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
01636 static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL;   /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
01637 static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL;   /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
01638 static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL;   /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
01639 
01640 #ifdef XXH_OLD_NAMES
01641 #  define PRIME64_1 XXH_PRIME64_1
01642 #  define PRIME64_2 XXH_PRIME64_2
01643 #  define PRIME64_3 XXH_PRIME64_3
01644 #  define PRIME64_4 XXH_PRIME64_4
01645 #  define PRIME64_5 XXH_PRIME64_5
01646 #endif
01647 
01648 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
01649 {
01650     acc += input * XXH_PRIME64_2;
01651     acc  = XXH_rotl64(acc, 31);
01652     acc *= XXH_PRIME64_1;
01653     return acc;
01654 }
01655 
01656 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
01657 {
01658     val  = XXH64_round(0, val);
01659     acc ^= val;
01660     acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
01661     return acc;
01662 }
01663 
01664 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
01665 {
01666     h64 ^= h64 >> 33;
01667     h64 *= XXH_PRIME64_2;
01668     h64 ^= h64 >> 29;
01669     h64 *= XXH_PRIME64_3;
01670     h64 ^= h64 >> 32;
01671     return h64;
01672 }
01673 
01674 
01675 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
01676 
01677 static xxh_u64
01678 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
01679 {
01680 #define XXH_PROCESS1_64 do {                                   \
01681     h64 ^= (*ptr++) * XXH_PRIME64_5;                           \
01682     h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;                 \
01683 } while (0)
01684 
01685 #define XXH_PROCESS4_64 do {                                   \
01686     h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;      \
01687     ptr += 4;                                              \
01688     h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;     \
01689 } while (0)
01690 
01691 #define XXH_PROCESS8_64 do {                                   \
01692     xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
01693     ptr += 8;                                              \
01694     h64 ^= k1;                                             \
01695     h64  = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;     \
01696 } while (0)
01697 
01698     /* Rerolled version for 32-bit targets is faster and much smaller. */
01699     if (XXH_REROLL || XXH_REROLL_XXH64) {
01700         len &= 31;
01701         while (len >= 8) {
01702             XXH_PROCESS8_64;
01703             len -= 8;
01704         }
01705         if (len >= 4) {
01706             XXH_PROCESS4_64;
01707             len -= 4;
01708         }
01709         while (len > 0) {
01710             XXH_PROCESS1_64;
01711             --len;
01712         }
01713          return  XXH64_avalanche(h64);
01714     } else {
01715         switch(len & 31) {
01716            case 24: XXH_PROCESS8_64;
01717                          /* fallthrough */
01718            case 16: XXH_PROCESS8_64;
01719                          /* fallthrough */
01720            case  8: XXH_PROCESS8_64;
01721                     return XXH64_avalanche(h64);
01722 
01723            case 28: XXH_PROCESS8_64;
01724                          /* fallthrough */
01725            case 20: XXH_PROCESS8_64;
01726                          /* fallthrough */
01727            case 12: XXH_PROCESS8_64;
01728                          /* fallthrough */
01729            case  4: XXH_PROCESS4_64;
01730                     return XXH64_avalanche(h64);
01731 
01732            case 25: XXH_PROCESS8_64;
01733                          /* fallthrough */
01734            case 17: XXH_PROCESS8_64;
01735                          /* fallthrough */
01736            case  9: XXH_PROCESS8_64;
01737                     XXH_PROCESS1_64;
01738                     return XXH64_avalanche(h64);
01739 
01740            case 29: XXH_PROCESS8_64;
01741                          /* fallthrough */
01742            case 21: XXH_PROCESS8_64;
01743                          /* fallthrough */
01744            case 13: XXH_PROCESS8_64;
01745                          /* fallthrough */
01746            case  5: XXH_PROCESS4_64;
01747                     XXH_PROCESS1_64;
01748                     return XXH64_avalanche(h64);
01749 
01750            case 26: XXH_PROCESS8_64;
01751                          /* fallthrough */
01752            case 18: XXH_PROCESS8_64;
01753                          /* fallthrough */
01754            case 10: XXH_PROCESS8_64;
01755                     XXH_PROCESS1_64;
01756                     XXH_PROCESS1_64;
01757                     return XXH64_avalanche(h64);
01758 
01759            case 30: XXH_PROCESS8_64;
01760                          /* fallthrough */
01761            case 22: XXH_PROCESS8_64;
01762                          /* fallthrough */
01763            case 14: XXH_PROCESS8_64;
01764                          /* fallthrough */
01765            case  6: XXH_PROCESS4_64;
01766                     XXH_PROCESS1_64;
01767                     XXH_PROCESS1_64;
01768                     return XXH64_avalanche(h64);
01769 
01770            case 27: XXH_PROCESS8_64;
01771                          /* fallthrough */
01772            case 19: XXH_PROCESS8_64;
01773                          /* fallthrough */
01774            case 11: XXH_PROCESS8_64;
01775                     XXH_PROCESS1_64;
01776                     XXH_PROCESS1_64;
01777                     XXH_PROCESS1_64;
01778                     return XXH64_avalanche(h64);
01779 
01780            case 31: XXH_PROCESS8_64;
01781                          /* fallthrough */
01782            case 23: XXH_PROCESS8_64;
01783                          /* fallthrough */
01784            case 15: XXH_PROCESS8_64;
01785                          /* fallthrough */
01786            case  7: XXH_PROCESS4_64;
01787                          /* fallthrough */
01788            case  3: XXH_PROCESS1_64;
01789                          /* fallthrough */
01790            case  2: XXH_PROCESS1_64;
01791                          /* fallthrough */
01792            case  1: XXH_PROCESS1_64;
01793                          /* fallthrough */
01794            case  0: return XXH64_avalanche(h64);
01795         }
01796     }
01797     /* impossible to reach */
01798     XXH_ASSERT(0);
01799     return 0;  /* unreachable, but some compilers complain without it */
01800 }
01801 
01802 #ifdef XXH_OLD_NAMES
01803 #  define PROCESS1_64 XXH_PROCESS1_64
01804 #  define PROCESS4_64 XXH_PROCESS4_64
01805 #  define PROCESS8_64 XXH_PROCESS8_64
01806 #else
01807 #  undef XXH_PROCESS1_64
01808 #  undef XXH_PROCESS4_64
01809 #  undef XXH_PROCESS8_64
01810 #endif
01811 
01812 XXH_FORCE_INLINE xxh_u64
01813 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
01814 {
01815     const xxh_u8* bEnd = input + len;
01816     xxh_u64 h64;
01817 
01818 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
01819     if (input==NULL) {
01820         len=0;
01821         bEnd=input=(const xxh_u8*)(size_t)32;
01822     }
01823 #endif
01824 
01825     if (len>=32) {
01826         const xxh_u8* const limit = bEnd - 32;
01827         xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
01828         xxh_u64 v2 = seed + XXH_PRIME64_2;
01829         xxh_u64 v3 = seed + 0;
01830         xxh_u64 v4 = seed - XXH_PRIME64_1;
01831 
01832         do {
01833             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
01834             v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
01835             v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
01836             v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
01837         } while (input<=limit);
01838 
01839         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
01840         h64 = XXH64_mergeRound(h64, v1);
01841         h64 = XXH64_mergeRound(h64, v2);
01842         h64 = XXH64_mergeRound(h64, v3);
01843         h64 = XXH64_mergeRound(h64, v4);
01844 
01845     } else {
01846         h64  = seed + XXH_PRIME64_5;
01847     }
01848 
01849     h64 += (xxh_u64) len;
01850 
01851     return XXH64_finalize(h64, input, len, align);
01852 }
01853 
01854 
01855 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
01856 {
01857 #if 0
01858     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
01859     XXH64_state_t state;
01860     XXH64_reset(&state, seed);
01861     XXH64_update(&state, (const xxh_u8*)input, len);
01862     return XXH64_digest(&state);
01863 
01864 #else
01865 
01866     if (XXH_FORCE_ALIGN_CHECK) {
01867         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
01868             return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
01869     }   }
01870 
01871     return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
01872 
01873 #endif
01874 }
01875 
01876 /*******   Hash Streaming   *******/
01877 
01878 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
01879 {
01880     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
01881 }
01882 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
01883 {
01884     XXH_free(statePtr);
01885     return XXH_OK;
01886 }
01887 
01888 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
01889 {
01890     memcpy(dstState, srcState, sizeof(*dstState));
01891 }
01892 
01893 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
01894 {
01895     XXH64_state_t state;   /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
01896     memset(&state, 0, sizeof(state));
01897     state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
01898     state.v2 = seed + XXH_PRIME64_2;
01899     state.v3 = seed + 0;
01900     state.v4 = seed - XXH_PRIME64_1;
01901      /* do not write into reserved64, might be removed in a future version */
01902     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
01903     return XXH_OK;
01904 }
01905 
01906 XXH_PUBLIC_API XXH_errorcode
01907 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
01908 {
01909     if (input==NULL)
01910 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
01911         return XXH_OK;
01912 #else
01913         return XXH_ERROR;
01914 #endif
01915 
01916     {   const xxh_u8* p = (const xxh_u8*)input;
01917         const xxh_u8* const bEnd = p + len;
01918 
01919         state->total_len += len;
01920 
01921         if (state->memsize + len < 32) {  /* fill in tmp buffer */
01922             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
01923             state->memsize += (xxh_u32)len;
01924             return XXH_OK;
01925         }
01926 
01927         if (state->memsize) {   /* tmp buffer is full */
01928             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
01929             state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
01930             state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
01931             state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
01932             state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
01933             p += 32-state->memsize;
01934             state->memsize = 0;
01935         }
01936 
01937         if (p+32 <= bEnd) {
01938             const xxh_u8* const limit = bEnd - 32;
01939             xxh_u64 v1 = state->v1;
01940             xxh_u64 v2 = state->v2;
01941             xxh_u64 v3 = state->v3;
01942             xxh_u64 v4 = state->v4;
01943 
01944             do {
01945                 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
01946                 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
01947                 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
01948                 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
01949             } while (p<=limit);
01950 
01951             state->v1 = v1;
01952             state->v2 = v2;
01953             state->v3 = v3;
01954             state->v4 = v4;
01955         }
01956 
01957         if (p < bEnd) {
01958             XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
01959             state->memsize = (unsigned)(bEnd-p);
01960         }
01961     }
01962 
01963     return XXH_OK;
01964 }
01965 
01966 
01967 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
01968 {
01969     xxh_u64 h64;
01970 
01971     if (state->total_len >= 32) {
01972         xxh_u64 const v1 = state->v1;
01973         xxh_u64 const v2 = state->v2;
01974         xxh_u64 const v3 = state->v3;
01975         xxh_u64 const v4 = state->v4;
01976 
01977         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
01978         h64 = XXH64_mergeRound(h64, v1);
01979         h64 = XXH64_mergeRound(h64, v2);
01980         h64 = XXH64_mergeRound(h64, v3);
01981         h64 = XXH64_mergeRound(h64, v4);
01982     } else {
01983         h64  = state->v3 /*seed*/ + XXH_PRIME64_5;
01984     }
01985 
01986     h64 += (xxh_u64) state->total_len;
01987 
01988     return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
01989 }
01990 
01991 
01992 /******* Canonical representation   *******/
01993 
01994 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
01995 {
01996     XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
01997     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
01998     memcpy(dst, &hash, sizeof(*dst));
01999 }
02000 
02001 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
02002 {
02003     return XXH_readBE64(src);
02004 }
02005 
02006 
02007 
02008 /* *********************************************************************
02009 *  XXH3
02010 *  New generation hash designed for speed on small keys and vectorization
02011 ************************************************************************ */
02012 
02013 #include "xxh3.h"
02014 
02015 
02016 #endif  /* XXH_NO_LONG_LONG */
02017 
02018 
02019 #endif  /* XXH_IMPLEMENTATION */
02020 
02021 
02022 #if defined (__cplusplus)
02023 }
02024 #endif