You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

xxhash.h 205KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580
  1. /*
  2. * xxHash - Extremely Fast Hash algorithm
  3. * Header File
  4. * Copyright (C) 2012-2020 Yann Collet
  5. *
  6. * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are
  10. * met:
  11. *
  12. * * Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * * Redistributions in binary form must reproduce the above
  15. * copyright notice, this list of conditions and the following disclaimer
  16. * in the documentation and/or other materials provided with the
  17. * distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * You can contact the author at:
  32. * - xxHash homepage: https://www.xxhash.com
  33. * - xxHash source repository: https://github.com/Cyan4973/xxHash
  34. */
  35. /*!
  36. * @mainpage xxHash
  37. *
  38. * @file xxhash.h
  39. * xxHash prototypes and implementation
  40. */
  41. /* TODO: update */
  42. /* Notice extracted from xxHash homepage:
  43. xxHash is an extremely fast hash algorithm, running at RAM speed limits.
  44. It also successfully passes all tests from the SMHasher suite.
  45. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
  46. Name Speed Q.Score Author
  47. xxHash 5.4 GB/s 10
  48. CrapWow 3.2 GB/s 2 Andrew
  49. MurmurHash 3a 2.7 GB/s 10 Austin Appleby
  50. SpookyHash 2.0 GB/s 10 Bob Jenkins
  51. SBox 1.4 GB/s 9 Bret Mulvey
  52. Lookup3 1.2 GB/s 9 Bob Jenkins
  53. SuperFastHash 1.2 GB/s 1 Paul Hsieh
  54. CityHash64 1.05 GB/s 10 Pike & Alakuijala
  55. FNV 0.55 GB/s 5 Fowler, Noll, Vo
  56. CRC32 0.43 GB/s 9
  57. MD5-32 0.33 GB/s 10 Ronald L. Rivest
  58. SHA1-32 0.28 GB/s 10
  59. Q.Score is a measure of quality of the hash function.
  60. It depends on successfully passing SMHasher test set.
  61. 10 is a perfect score.
  62. Note: SMHasher's CRC32 implementation is not the fastest one.
  63. Other speed-oriented implementations can be faster,
  64. especially in combination with PCLMUL instruction:
  65. https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
  66. A 64-bit version, named XXH64, is available since r35.
  67. It offers much better speed, but for 64-bit applications only.
  68. Name Speed on 64 bits Speed on 32 bits
  69. XXH64 13.8 GB/s 1.9 GB/s
  70. XXH32 6.8 GB/s 6.0 GB/s
  71. */
  72. #if defined (__cplusplus)
  73. extern "C" {
  74. #endif
  75. /* ****************************
  76. * INLINE mode
  77. ******************************/
  78. /*!
  79. * XXH_INLINE_ALL (and XXH_PRIVATE_API)
  80. * Use these build macros to inline xxhash into the target unit.
  81. * Inlining improves performance on small inputs, especially when the length is
  82. * expressed as a compile-time constant:
  83. *
  84. * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
  85. *
  86. * It also keeps xxHash symbols private to the unit, so they are not exported.
  87. *
  88. * Usage:
  89. * #define XXH_INLINE_ALL
  90. * #include "xxhash.h"
  91. *
  92. * Do not compile and link xxhash.o as a separate object, as it is not useful.
  93. */
  94. #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
  95. && !defined(XXH_INLINE_ALL_31684351384)
  96. /* this section should be traversed only once */
  97. # define XXH_INLINE_ALL_31684351384
  98. /* give access to the advanced API, required to compile implementations */
  99. # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
  100. # define XXH_STATIC_LINKING_ONLY
  101. /* make all functions private */
  102. # undef XXH_PUBLIC_API
  103. # if defined(__GNUC__)
  104. # define XXH_PUBLIC_API static __inline __attribute__((unused))
  105. # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
  106. # define XXH_PUBLIC_API static inline
  107. # elif defined(_MSC_VER)
  108. # define XXH_PUBLIC_API static __inline
  109. # else
  110. /* note: this version may generate warnings for unused static functions */
  111. # define XXH_PUBLIC_API static
  112. # endif
  113. /*
  114. * This part deals with the special case where a unit wants to inline xxHash,
  115. * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
  116. * such as part of some previously included *.h header file.
  117. * Without further action, the new include would just be ignored,
  118. * and functions would effectively _not_ be inlined (silent failure).
  119. * The following macros solve this situation by prefixing all inlined names,
  120. * avoiding naming collision with previous inclusions.
  121. */
  122. /* Before that, we unconditionally #undef all symbols,
  123. * in case they were already defined with XXH_NAMESPACE.
  124. * They will then be redefined for XXH_INLINE_ALL
  125. */
  126. # undef XXH_versionNumber
  127. /* XXH32 */
  128. # undef XXH32
  129. # undef XXH32_createState
  130. # undef XXH32_freeState
  131. # undef XXH32_reset
  132. # undef XXH32_update
  133. # undef XXH32_digest
  134. # undef XXH32_copyState
  135. # undef XXH32_canonicalFromHash
  136. # undef XXH32_hashFromCanonical
  137. /* XXH64 */
  138. # undef XXH64
  139. # undef XXH64_createState
  140. # undef XXH64_freeState
  141. # undef XXH64_reset
  142. # undef XXH64_update
  143. # undef XXH64_digest
  144. # undef XXH64_copyState
  145. # undef XXH64_canonicalFromHash
  146. # undef XXH64_hashFromCanonical
  147. /* XXH3_64bits */
  148. # undef XXH3_64bits
  149. # undef XXH3_64bits_withSecret
  150. # undef XXH3_64bits_withSeed
  151. # undef XXH3_64bits_withSecretandSeed
  152. # undef XXH3_createState
  153. # undef XXH3_freeState
  154. # undef XXH3_copyState
  155. # undef XXH3_64bits_reset
  156. # undef XXH3_64bits_reset_withSeed
  157. # undef XXH3_64bits_reset_withSecret
  158. # undef XXH3_64bits_update
  159. # undef XXH3_64bits_digest
  160. # undef XXH3_generateSecret
  161. /* XXH3_128bits */
  162. # undef XXH128
  163. # undef XXH3_128bits
  164. # undef XXH3_128bits_withSeed
  165. # undef XXH3_128bits_withSecret
  166. # undef XXH3_128bits_reset
  167. # undef XXH3_128bits_reset_withSeed
  168. # undef XXH3_128bits_reset_withSecret
  169. # undef XXH3_128bits_reset_withSecretandSeed
  170. # undef XXH3_128bits_update
  171. # undef XXH3_128bits_digest
  172. # undef XXH128_isEqual
  173. # undef XXH128_cmp
  174. # undef XXH128_canonicalFromHash
  175. # undef XXH128_hashFromCanonical
  176. /* Finally, free the namespace itself */
  177. # undef XXH_NAMESPACE
  178. /* employ the namespace for XXH_INLINE_ALL */
  179. # define XXH_NAMESPACE XXH_INLINE_
  180. /*
  181. * Some identifiers (enums, type names) are not symbols,
  182. * but they must nonetheless be renamed to avoid redeclaration.
  183. * Alternative solution: do not redeclare them.
  184. * However, this requires some #ifdefs, and has a more dispersed impact.
  185. * Meanwhile, renaming can be achieved in a single place.
  186. */
  187. # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
  188. # define XXH_OK XXH_IPREF(XXH_OK)
  189. # define XXH_ERROR XXH_IPREF(XXH_ERROR)
  190. # define XXH_errorcode XXH_IPREF(XXH_errorcode)
  191. # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
  192. # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
  193. # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
  194. # define XXH32_state_s XXH_IPREF(XXH32_state_s)
  195. # define XXH32_state_t XXH_IPREF(XXH32_state_t)
  196. # define XXH64_state_s XXH_IPREF(XXH64_state_s)
  197. # define XXH64_state_t XXH_IPREF(XXH64_state_t)
  198. # define XXH3_state_s XXH_IPREF(XXH3_state_s)
  199. # define XXH3_state_t XXH_IPREF(XXH3_state_t)
  200. # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
  201. /* Ensure the header is parsed again, even if it was previously included */
  202. # undef XXHASH_H_5627135585666179
  203. # undef XXHASH_H_STATIC_13879238742
  204. #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
  205. /* ****************************************************************
  206. * Stable API
  207. *****************************************************************/
  208. #ifndef XXHASH_H_5627135585666179
  209. #define XXHASH_H_5627135585666179 1
  210. /*!
  211. * @defgroup public Public API
  212. * Contains details on the public xxHash functions.
  213. * @{
  214. */
  215. /* specific declaration modes for Windows */
  216. #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
  217. # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
  218. # ifdef XXH_EXPORT
  219. # define XXH_PUBLIC_API __declspec(dllexport)
  220. # elif XXH_IMPORT
  221. # define XXH_PUBLIC_API __declspec(dllimport)
  222. # endif
  223. # else
  224. # define XXH_PUBLIC_API /* do nothing */
  225. # endif
  226. #endif
  227. #ifdef XXH_DOXYGEN
  228. /*!
  229. * @brief Emulate a namespace by transparently prefixing all symbols.
  230. *
  231. * If you want to include _and expose_ xxHash functions from within your own
  232. * library, but also want to avoid symbol collisions with other libraries which
  233. * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
  234. * any public symbol from xxhash library with the value of XXH_NAMESPACE
  235. * (therefore, avoid empty or numeric values).
  236. *
  237. * Note that no change is required within the calling program as long as it
  238. * includes `xxhash.h`: Regular symbol names will be automatically translated
  239. * by this header.
  240. */
  241. # define XXH_NAMESPACE /* YOUR NAME HERE */
  242. # undef XXH_NAMESPACE
  243. #endif
  244. #ifdef XXH_NAMESPACE
  245. # define XXH_CAT(A,B) A##B
  246. # define XXH_NAME2(A,B) XXH_CAT(A,B)
  247. # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
  248. /* XXH32 */
  249. # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
  250. # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
  251. # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
  252. # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
  253. # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
  254. # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
  255. # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
  256. # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
  257. # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
  258. /* XXH64 */
  259. # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
  260. # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
  261. # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
  262. # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
  263. # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
  264. # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
  265. # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
  266. # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
  267. # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
  268. /* XXH3_64bits */
  269. # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
  270. # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
  271. # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
  272. # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
  273. # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
  274. # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
  275. # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
  276. # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
  277. # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
  278. # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
  279. # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
  280. # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
  281. # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
  282. # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
  283. # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
  284. /* XXH3_128bits */
  285. # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
  286. # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
  287. # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
  288. # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
  289. # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
  290. # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
  291. # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
  292. # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
  293. # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
  294. # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
  295. # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
  296. # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
  297. # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
  298. # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
  299. # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
  300. #endif
  301. /* *************************************
  302. * Version
  303. ***************************************/
  304. #define XXH_VERSION_MAJOR 0
  305. #define XXH_VERSION_MINOR 8
  306. #define XXH_VERSION_RELEASE 1
  307. #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
  308. /*!
  309. * @brief Obtains the xxHash version.
  310. *
  311. * This is mostly useful when xxHash is compiled as a shared library,
  312. * since the returned value comes from the library, as opposed to header file.
  313. *
  314. * @return `XXH_VERSION_NUMBER` of the invoked library.
  315. */
  316. XXH_PUBLIC_API unsigned XXH_versionNumber (void);
  317. /* ****************************
  318. * Common basic types
  319. ******************************/
  320. #include <stddef.h> /* size_t */
  321. typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
  322. /*-**********************************************************************
  323. * 32-bit hash
  324. ************************************************************************/
  325. #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
  326. /*!
  327. * @brief An unsigned 32-bit integer.
  328. *
  329. * Not necessarily defined to `uint32_t` but functionally equivalent.
  330. */
  331. typedef uint32_t XXH32_hash_t;
  332. #elif !defined (__VMS) \
  333. && (defined (__cplusplus) \
  334. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  335. # include <stdint.h>
  336. typedef uint32_t XXH32_hash_t;
  337. #else
  338. # include <limits.h>
  339. # if UINT_MAX == 0xFFFFFFFFUL
  340. typedef unsigned int XXH32_hash_t;
  341. # else
  342. # if ULONG_MAX == 0xFFFFFFFFUL
  343. typedef unsigned long XXH32_hash_t;
  344. # else
  345. # error "unsupported platform: need a 32-bit type"
  346. # endif
  347. # endif
  348. #endif
  349. /*!
  350. * @}
  351. *
  352. * @defgroup xxh32_family XXH32 family
  353. * @ingroup public
  354. * Contains functions used in the classic 32-bit xxHash algorithm.
  355. *
  356. * @note
  357. * XXH32 is useful for older platforms, with no or poor 64-bit performance.
  358. * Note that @ref xxh3_family provides competitive speed
  359. * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
  360. *
  361. * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
  362. * @see @ref xxh32_impl for implementation details
  363. * @{
  364. */
  365. /*!
  366. * @brief Calculates the 32-bit hash of @p input using xxHash32.
  367. *
  368. * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
  369. *
  370. * @param input The block of data to be hashed, at least @p length bytes in size.
  371. * @param length The length of @p input, in bytes.
  372. * @param seed The 32-bit seed to alter the hash's output predictably.
  373. *
  374. * @pre
  375. * The memory between @p input and @p input + @p length must be valid,
  376. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  377. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  378. *
  379. * @return The calculated 32-bit hash value.
  380. *
  381. * @see
  382. * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
  383. * Direct equivalents for the other variants of xxHash.
  384. * @see
  385. * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
  386. */
  387. XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
  388. /*!
  389. * Streaming functions generate the xxHash value from an incremental input.
  390. * This method is slower than single-call functions, due to state management.
  391. * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
  392. *
  393. * An XXH state must first be allocated using `XXH*_createState()`.
  394. *
  395. * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
  396. *
  397. * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
  398. *
  399. * The function returns an error code, with 0 meaning OK, and any other value
  400. * meaning there is an error.
  401. *
  402. * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
  403. * This function returns the nn-bits hash as an int or long long.
  404. *
  405. * It's still possible to continue inserting input into the hash state after a
  406. * digest, and generate new hash values later on by invoking `XXH*_digest()`.
  407. *
  408. * When done, release the state using `XXH*_freeState()`.
  409. *
  410. * Example code for incrementally hashing a file:
  411. * @code{.c}
  412. * #include <stdio.h>
  413. * #include <xxhash.h>
  414. * #define BUFFER_SIZE 256
  415. *
  416. * // Note: XXH64 and XXH3 use the same interface.
  417. * XXH32_hash_t
  418. * hashFile(FILE* stream)
  419. * {
  420. * XXH32_state_t* state;
  421. * unsigned char buf[BUFFER_SIZE];
  422. * size_t amt;
  423. * XXH32_hash_t hash;
  424. *
  425. * state = XXH32_createState(); // Create a state
  426. * assert(state != NULL); // Error check here
  427. * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
  428. * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
  429. * XXH32_update(state, buf, amt); // Hash the file in chunks
  430. * }
  431. * hash = XXH32_digest(state); // Finalize the hash
  432. * XXH32_freeState(state); // Clean up
  433. * return hash;
  434. * }
  435. * @endcode
  436. */
  437. /*!
  438. * @typedef struct XXH32_state_s XXH32_state_t
  439. * @brief The opaque state struct for the XXH32 streaming API.
  440. *
  441. * @see XXH32_state_s for details.
  442. */
  443. typedef struct XXH32_state_s XXH32_state_t;
  444. /*!
  445. * @brief Allocates an @ref XXH32_state_t.
  446. *
  447. * Must be freed with XXH32_freeState().
  448. * @return An allocated XXH32_state_t on success, `NULL` on failure.
  449. */
  450. XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
  451. /*!
  452. * @brief Frees an @ref XXH32_state_t.
  453. *
  454. * Must be allocated with XXH32_createState().
  455. * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
  456. * @return XXH_OK.
  457. */
  458. XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
  459. /*!
  460. * @brief Copies one @ref XXH32_state_t to another.
  461. *
  462. * @param dst_state The state to copy to.
  463. * @param src_state The state to copy from.
  464. * @pre
  465. * @p dst_state and @p src_state must not be `NULL` and must not overlap.
  466. */
  467. XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
  468. /*!
  469. * @brief Resets an @ref XXH32_state_t to begin a new hash.
  470. *
  471. * This function resets and seeds a state. Call it before @ref XXH32_update().
  472. *
  473. * @param statePtr The state struct to reset.
  474. * @param seed The 32-bit seed to alter the hash result predictably.
  475. *
  476. * @pre
  477. * @p statePtr must not be `NULL`.
  478. *
  479. * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
  480. */
  481. XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
  482. /*!
  483. * @brief Consumes a block of @p input to an @ref XXH32_state_t.
  484. *
  485. * Call this to incrementally consume blocks of data.
  486. *
  487. * @param statePtr The state struct to update.
  488. * @param input The block of data to be hashed, at least @p length bytes in size.
  489. * @param length The length of @p input, in bytes.
  490. *
  491. * @pre
  492. * @p statePtr must not be `NULL`.
  493. * @pre
  494. * The memory between @p input and @p input + @p length must be valid,
  495. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  496. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  497. *
  498. * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
  499. */
  500. XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
  501. /*!
  502. * @brief Returns the calculated hash value from an @ref XXH32_state_t.
  503. *
  504. * @note
  505. * Calling XXH32_digest() will not affect @p statePtr, so you can update,
  506. * digest, and update again.
  507. *
  508. * @param statePtr The state struct to calculate the hash from.
  509. *
  510. * @pre
  511. * @p statePtr must not be `NULL`.
  512. *
  513. * @return The calculated xxHash32 value from that state.
  514. */
  515. XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
  516. /******* Canonical representation *******/
  517. /*
  518. * The default return values from XXH functions are unsigned 32 and 64 bit
  519. * integers.
  520. * This the simplest and fastest format for further post-processing.
  521. *
  522. * However, this leaves open the question of what is the order on the byte level,
  523. * since little and big endian conventions will store the same number differently.
  524. *
  525. * The canonical representation settles this issue by mandating big-endian
  526. * convention, the same convention as human-readable numbers (large digits first).
  527. *
  528. * When writing hash values to storage, sending them over a network, or printing
  529. * them, it's highly recommended to use the canonical representation to ensure
  530. * portability across a wider range of systems, present and future.
  531. *
  532. * The following functions allow transformation of hash values to and from
  533. * canonical format.
  534. */
  535. /*!
  536. * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
  537. */
  538. typedef struct {
  539. unsigned char digest[4]; /*!< Hash bytes, big endian */
  540. } XXH32_canonical_t;
  541. /*!
  542. * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
  543. *
  544. * @param dst The @ref XXH32_canonical_t pointer to be stored to.
  545. * @param hash The @ref XXH32_hash_t to be converted.
  546. *
  547. * @pre
  548. * @p dst must not be `NULL`.
  549. */
  550. XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
  551. /*!
  552. * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
  553. *
  554. * @param src The @ref XXH32_canonical_t to convert.
  555. *
  556. * @pre
  557. * @p src must not be `NULL`.
  558. *
  559. * @return The converted hash.
  560. */
  561. XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
  562. #ifdef __has_attribute
  563. # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
  564. #else
  565. # define XXH_HAS_ATTRIBUTE(x) 0
  566. #endif
  567. /* C-language Attributes are added in C23. */
  568. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
  569. # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
  570. #else
  571. # define XXH_HAS_C_ATTRIBUTE(x) 0
  572. #endif
  573. #if defined(__cplusplus) && defined(__has_cpp_attribute)
  574. # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
  575. #else
  576. # define XXH_HAS_CPP_ATTRIBUTE(x) 0
  577. #endif
  578. /*
  579. Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
  580. introduced in CPP17 and C23.
  581. CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
  582. C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
  583. */
  584. #if XXH_HAS_C_ATTRIBUTE(x)
  585. # define XXH_FALLTHROUGH [[fallthrough]]
  586. #elif XXH_HAS_CPP_ATTRIBUTE(x)
  587. # define XXH_FALLTHROUGH [[fallthrough]]
  588. #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
  589. # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
  590. #else
  591. # define XXH_FALLTHROUGH
  592. #endif
  593. /*!
  594. * @}
  595. * @ingroup public
  596. * @{
  597. */
  598. #ifndef XXH_NO_LONG_LONG
  599. /*-**********************************************************************
  600. * 64-bit hash
  601. ************************************************************************/
  602. #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
  603. /*!
  604. * @brief An unsigned 64-bit integer.
  605. *
  606. * Not necessarily defined to `uint64_t` but functionally equivalent.
  607. */
  608. typedef uint64_t XXH64_hash_t;
  609. #elif !defined (__VMS) \
  610. && (defined (__cplusplus) \
  611. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  612. # include <stdint.h>
  613. typedef uint64_t XXH64_hash_t;
  614. #else
  615. # include <limits.h>
  616. # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
  617. /* LP64 ABI says uint64_t is unsigned long */
  618. typedef unsigned long XXH64_hash_t;
  619. # else
  620. /* the following type must have a width of 64-bit */
  621. typedef unsigned long long XXH64_hash_t;
  622. # endif
  623. #endif
  624. /*!
  625. * @}
  626. *
  627. * @defgroup xxh64_family XXH64 family
  628. * @ingroup public
  629. * @{
  630. * Contains functions used in the classic 64-bit xxHash algorithm.
  631. *
  632. * @note
  633. * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
  634. * and offers true 64/128 bit hash results.
  635. * It provides better speed for systems with vector processing capabilities.
  636. */
  637. /*!
  638. * @brief Calculates the 64-bit hash of @p input using xxHash64.
  639. *
  640. * This function usually runs faster on 64-bit systems, but slower on 32-bit
  641. * systems (see benchmark).
  642. *
  643. * @param input The block of data to be hashed, at least @p length bytes in size.
  644. * @param length The length of @p input, in bytes.
  645. * @param seed The 64-bit seed to alter the hash's output predictably.
  646. *
  647. * @pre
  648. * The memory between @p input and @p input + @p length must be valid,
  649. * readable, contiguous memory. However, if @p length is `0`, @p input may be
  650. * `NULL`. In C++, this also must be *TriviallyCopyable*.
  651. *
  652. * @return The calculated 64-bit hash.
  653. *
  654. * @see
  655. * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
  656. * Direct equivalents for the other variants of xxHash.
  657. * @see
  658. * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
  659. */
  660. XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
  661. /******* Streaming *******/
  662. /*!
  663. * @brief The opaque state struct for the XXH64 streaming API.
  664. *
  665. * @see XXH64_state_s for details.
  666. */
  667. typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
  668. XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
  669. XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
  670. XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
  671. XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
  672. XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
  673. XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
  674. /******* Canonical representation *******/
  675. typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
  676. XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
  677. XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
  678. /*!
  679. * @}
  680. * ************************************************************************
  681. * @defgroup xxh3_family XXH3 family
  682. * @ingroup public
  683. * @{
  684. *
  685. * XXH3 is a more recent hash algorithm featuring:
  686. * - Improved speed for both small and large inputs
  687. * - True 64-bit and 128-bit outputs
  688. * - SIMD acceleration
  689. * - Improved 32-bit viability
  690. *
  691. * Speed analysis methodology is explained here:
  692. *
  693. * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
  694. *
  695. * Compared to XXH64, expect XXH3 to run approximately
  696. * ~2x faster on large inputs and >3x faster on small ones,
  697. * exact differences vary depending on platform.
  698. *
  699. * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
  700. * but does not require it.
  701. * Any 32-bit and 64-bit targets that can run XXH32 smoothly
  702. * can run XXH3 at competitive speeds, even without vector support.
  703. * Further details are explained in the implementation.
  704. *
  705. * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
  706. * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
  707. *
  708. * XXH3 implementation is portable:
  709. * it has a generic C90 formulation that can be compiled on any platform,
  710. * all implementations generage exactly the same hash value on all platforms.
  711. * Starting from v0.8.0, it's also labelled "stable", meaning that
  712. * any future version will also generate the same hash value.
  713. *
  714. * XXH3 offers 2 variants, _64bits and _128bits.
  715. *
  716. * When only 64 bits are needed, prefer invoking the _64bits variant, as it
  717. * reduces the amount of mixing, resulting in faster speed on small inputs.
  718. * It's also generally simpler to manipulate a scalar return type than a struct.
  719. *
  720. * The API supports one-shot hashing, streaming mode, and custom secrets.
  721. */
  722. /*-**********************************************************************
  723. * XXH3 64-bit variant
  724. ************************************************************************/
  725. /* XXH3_64bits():
  726. * default 64-bit variant, using default secret and default seed of 0.
  727. * It's the fastest variant. */
  728. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
  729. /*
  730. * XXH3_64bits_withSeed():
  731. * This variant generates a custom secret on the fly
  732. * based on default secret altered using the `seed` value.
  733. * While this operation is decently fast, note that it's not completely free.
  734. * Note: seed==0 produces the same results as XXH3_64bits().
  735. */
  736. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
  737. /*!
  738. * The bare minimum size for a custom secret.
  739. *
  740. * @see
  741. * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
  742. * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
  743. */
  744. #define XXH3_SECRET_SIZE_MIN 136
  745. /*
  746. * XXH3_64bits_withSecret():
  747. * It's possible to provide any blob of bytes as a "secret" to generate the hash.
  748. * This makes it more difficult for an external actor to prepare an intentional collision.
  749. * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
  750. * However, the quality of the secret impacts the dispersion of the hash algorithm.
  751. * Therefore, the secret _must_ look like a bunch of random bytes.
  752. * Avoid "trivial" or structured data such as repeated sequences or a text document.
  753. * Whenever in doubt about the "randomness" of the blob of bytes,
  754. * consider employing "XXH3_generateSecret()" instead (see below).
  755. * It will generate a proper high entropy secret derived from the blob of bytes.
  756. * Another advantage of using XXH3_generateSecret() is that
  757. * it guarantees that all bits within the initial blob of bytes
  758. * will impact every bit of the output.
  759. * This is not necessarily the case when using the blob of bytes directly
  760. * because, when hashing _small_ inputs, only a portion of the secret is employed.
  761. */
  762. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
  763. /******* Streaming *******/
  764. /*
  765. * Streaming requires state maintenance.
  766. * This operation costs memory and CPU.
  767. * As a consequence, streaming is slower than one-shot hashing.
  768. * For better performance, prefer one-shot functions whenever applicable.
  769. */
  770. /*!
  771. * @brief The state struct for the XXH3 streaming API.
  772. *
  773. * @see XXH3_state_s for details.
  774. */
  775. typedef struct XXH3_state_s XXH3_state_t;
  776. XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
  777. XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
  778. XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
  779. /*
  780. * XXH3_64bits_reset():
  781. * Initialize with default parameters.
  782. * digest will be equivalent to `XXH3_64bits()`.
  783. */
  784. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
  785. /*
  786. * XXH3_64bits_reset_withSeed():
  787. * Generate a custom secret from `seed`, and store it into `statePtr`.
  788. * digest will be equivalent to `XXH3_64bits_withSeed()`.
  789. */
  790. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
  791. /*
  792. * XXH3_64bits_reset_withSecret():
  793. * `secret` is referenced, it _must outlive_ the hash streaming session.
  794. * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
  795. * and the quality of produced hash values depends on secret's entropy
  796. * (secret's content should look like a bunch of random bytes).
  797. * When in doubt about the randomness of a candidate `secret`,
  798. * consider employing `XXH3_generateSecret()` instead (see below).
  799. */
  800. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
  801. XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
  802. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
  803. /* note : canonical representation of XXH3 is the same as XXH64
  804. * since they both produce XXH64_hash_t values */
  805. /*-**********************************************************************
  806. * XXH3 128-bit variant
  807. ************************************************************************/
  808. /*!
  809. * @brief The return value from 128-bit hashes.
  810. *
  811. * Stored in little endian order, although the fields themselves are in native
  812. * endianness.
  813. */
  814. typedef struct {
  815. XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
  816. XXH64_hash_t high64; /*!< `value >> 64` */
  817. } XXH128_hash_t;
  818. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
  819. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
  820. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
  821. /******* Streaming *******/
  822. /*
  823. * Streaming requires state maintenance.
  824. * This operation costs memory and CPU.
  825. * As a consequence, streaming is slower than one-shot hashing.
  826. * For better performance, prefer one-shot functions whenever applicable.
  827. *
  828. * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
  829. * Use already declared XXH3_createState() and XXH3_freeState().
  830. *
  831. * All reset and streaming functions have same meaning as their 64-bit counterpart.
  832. */
  833. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
  834. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
  835. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
  836. XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
  837. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
  838. /* Following helper functions make it possible to compare XXH128_hast_t values.
  839. * Since XXH128_hash_t is a structure, this capability is not offered by the language.
  840. * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
  841. /*!
  842. * XXH128_isEqual():
  843. * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
  844. */
  845. XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
  846. /*!
  847. * XXH128_cmp():
  848. *
  849. * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
  850. *
  851. * return: >0 if *h128_1 > *h128_2
  852. * =0 if *h128_1 == *h128_2
  853. * <0 if *h128_1 < *h128_2
  854. */
  855. XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
  856. /******* Canonical representation *******/
  857. typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
  858. XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
  859. XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
  860. #endif /* XXH_NO_LONG_LONG */
  861. /*!
  862. * @}
  863. */
  864. #endif /* XXHASH_H_5627135585666179 */
  865. #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
  866. #define XXHASH_H_STATIC_13879238742
  867. /* ****************************************************************************
  868. * This section contains declarations which are not guaranteed to remain stable.
  869. * They may change in future versions, becoming incompatible with a different
  870. * version of the library.
  871. * These declarations should only be used with static linking.
  872. * Never use them in association with dynamic linking!
  873. ***************************************************************************** */
  874. /*
  875. * These definitions are only present to allow static allocation
  876. * of XXH states, on stack or in a struct, for example.
  877. * Never **ever** access their members directly.
  878. */
  879. /*!
  880. * @internal
  881. * @brief Structure for XXH32 streaming API.
  882. *
  883. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  884. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
  885. * an opaque type. This allows fields to safely be changed.
  886. *
  887. * Typedef'd to @ref XXH32_state_t.
  888. * Do not access the members of this struct directly.
  889. * @see XXH64_state_s, XXH3_state_s
  890. */
  891. struct XXH32_state_s {
  892. XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
  893. XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
  894. XXH32_hash_t v[4]; /*!< Accumulator lanes */
  895. XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
  896. XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
  897. XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may be removed. */
  898. }; /* typedef'd to XXH32_state_t */
  899. #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
  900. /*!
  901. * @internal
  902. * @brief Structure for XXH64 streaming API.
  903. *
  904. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  905. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
  906. * an opaque type. This allows fields to safely be changed.
  907. *
  908. * Typedef'd to @ref XXH64_state_t.
  909. * Do not access the members of this struct directly.
  910. * @see XXH32_state_s, XXH3_state_s
  911. */
  912. struct XXH64_state_s {
  913. XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
  914. XXH64_hash_t v[4]; /*!< Accumulator lanes */
  915. XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
  916. XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
  917. XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
  918. XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it may be removed. */
  919. }; /* typedef'd to XXH64_state_t */
  920. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
  921. # include <stdalign.h>
  922. # define XXH_ALIGN(n) alignas(n)
  923. #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
  924. /* In C++ alignas() is a keyword */
  925. # define XXH_ALIGN(n) alignas(n)
  926. #elif defined(__GNUC__)
  927. # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
  928. #elif defined(_MSC_VER)
  929. # define XXH_ALIGN(n) __declspec(align(n))
  930. #else
  931. # define XXH_ALIGN(n) /* disabled */
  932. #endif
  933. /* Old GCC versions only accept the attribute after the type in structures. */
  934. #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
  935. && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
  936. && defined(__GNUC__)
  937. # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
  938. #else
  939. # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
  940. #endif
  941. /*!
  942. * @brief The size of the internal XXH3 buffer.
  943. *
  944. * This is the optimal update size for incremental hashing.
  945. *
  946. * @see XXH3_64b_update(), XXH3_128b_update().
  947. */
  948. #define XXH3_INTERNALBUFFER_SIZE 256
  949. /*!
  950. * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
  951. *
  952. * This is the size used in @ref XXH3_kSecret and the seeded functions.
  953. *
  954. * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
  955. */
  956. #define XXH3_SECRET_DEFAULT_SIZE 192
  957. /*!
  958. * @internal
  959. * @brief Structure for XXH3 streaming API.
  960. *
  961. * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
  962. * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
  963. * Otherwise it is an opaque type.
  964. * Never use this definition in combination with dynamic library.
  965. * This allows fields to safely be changed in the future.
  966. *
  967. * @note ** This structure has a strict alignment requirement of 64 bytes!! **
  968. * Do not allocate this with `malloc()` or `new`,
  969. * it will not be sufficiently aligned.
  970. * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
  971. *
  972. * Typedef'd to @ref XXH3_state_t.
  973. * Do never access the members of this struct directly.
  974. *
  975. * @see XXH3_INITSTATE() for stack initialization.
  976. * @see XXH3_createState(), XXH3_freeState().
  977. * @see XXH32_state_s, XXH64_state_s
  978. */
  979. struct XXH3_state_s {
  980. XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
  981. /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
  982. XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
  983. /*!< Used to store a custom secret generated from a seed. */
  984. XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
  985. /*!< The internal buffer. @see XXH32_state_s::mem32 */
  986. XXH32_hash_t bufferedSize;
  987. /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
  988. XXH32_hash_t useSeed;
  989. /*!< Reserved field. Needed for padding on 64-bit. */
  990. size_t nbStripesSoFar;
  991. /*!< Number or stripes processed. */
  992. XXH64_hash_t totalLen;
  993. /*!< Total length hashed. 64-bit even on 32-bit targets. */
  994. size_t nbStripesPerBlock;
  995. /*!< Number of stripes per block. */
  996. size_t secretLimit;
  997. /*!< Size of @ref customSecret or @ref extSecret */
  998. XXH64_hash_t seed;
  999. /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
  1000. XXH64_hash_t reserved64;
  1001. /*!< Reserved field. */
  1002. const unsigned char* extSecret;
  1003. /*!< Reference to an external secret for the _withSecret variants, NULL
  1004. * for other variants. */
  1005. /* note: there may be some padding at the end due to alignment on 64 bytes */
  1006. }; /* typedef'd to XXH3_state_t */
  1007. #undef XXH_ALIGN_MEMBER
  1008. /*!
  1009. * @brief Initializes a stack-allocated `XXH3_state_s`.
  1010. *
  1011. * When the @ref XXH3_state_t structure is merely emplaced on stack,
  1012. * it should be initialized with XXH3_INITSTATE() or a memset()
  1013. * in case its first reset uses XXH3_NNbits_reset_withSeed().
  1014. * This init can be omitted if the first reset uses default or _withSecret mode.
  1015. * This operation isn't necessary when the state is created with XXH3_createState().
  1016. * Note that this doesn't prepare the state for a streaming operation,
  1017. * it's still necessary to use XXH3_NNbits_reset*() afterwards.
  1018. */
  1019. #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
  1020. /* XXH128() :
  1021. * simple alias to pre-selected XXH3_128bits variant
  1022. */
  1023. XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
  1024. /* === Experimental API === */
  1025. /* Symbols defined below must be considered tied to a specific library version. */
  1026. /*
  1027. * XXH3_generateSecret():
  1028. *
  1029. * Derive a high-entropy secret from any user-defined content, named customSeed.
  1030. * The generated secret can be used in combination with `*_withSecret()` functions.
  1031. * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
  1032. * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
  1033. *
  1034. * The function accepts as input a custom seed of any length and any content,
  1035. * and derives from it a high-entropy secret of length @secretSize
  1036. * into an already allocated buffer @secretBuffer.
  1037. * @secretSize must be >= XXH3_SECRET_SIZE_MIN
  1038. *
  1039. * The generated secret can then be used with any `*_withSecret()` variant.
  1040. * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
  1041. * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
  1042. * are part of this list. They all accept a `secret` parameter
  1043. * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
  1044. * _and_ feature very high entropy (consist of random-looking bytes).
  1045. * These conditions can be a high bar to meet, so
  1046. * XXH3_generateSecret() can be employed to ensure proper quality.
  1047. *
  1048. * customSeed can be anything. It can have any size, even small ones,
  1049. * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
  1050. * The resulting `secret` will nonetheless provide all required qualities.
  1051. *
  1052. * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
  1053. */
  1054. XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
  1055. /*
  1056. * XXH3_generateSecret_fromSeed():
  1057. *
  1058. * Generate the same secret as the _withSeed() variants.
  1059. *
  1060. * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
  1061. * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
  1062. *
  1063. * The generated secret can be used in combination with
  1064. *`*_withSecret()` and `_withSecretandSeed()` variants.
  1065. * This generator is notably useful in combination with `_withSecretandSeed()`,
  1066. * as a way to emulate a faster `_withSeed()` variant.
  1067. */
  1068. XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
  1069. /*
  1070. * *_withSecretandSeed() :
  1071. * These variants generate hash values using either
  1072. * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
  1073. * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
  1074. *
  1075. * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
  1076. * `_withSeed()` has to generate the secret on the fly for "large" keys.
  1077. * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
  1078. * `_withSecret()` has to generate the masks on the fly for "small" keys,
  1079. * which requires more instructions than _withSeed() variants.
  1080. * Therefore, _withSecretandSeed variant combines the best of both worlds.
  1081. *
  1082. * When @secret has been generated by XXH3_generateSecret_fromSeed(),
  1083. * this variant produces *exactly* the same results as `_withSeed()` variant,
  1084. * hence offering only a pure speed benefit on "large" input,
  1085. * by skipping the need to regenerate the secret for every large input.
  1086. *
  1087. * Another usage scenario is to hash the secret to a 64-bit hash value,
  1088. * for example with XXH3_64bits(), which then becomes the seed,
  1089. * and then employ both the seed and the secret in _withSecretandSeed().
  1090. * On top of speed, an added benefit is that each bit in the secret
  1091. * has a 50% chance to swap each bit in the output,
  1092. * via its impact to the seed.
  1093. * This is not guaranteed when using the secret directly in "small data" scenarios,
  1094. * because only portions of the secret are employed for small data.
  1095. */
  1096. XXH_PUBLIC_API XXH64_hash_t
  1097. XXH3_64bits_withSecretandSeed(const void* data, size_t len,
  1098. const void* secret, size_t secretSize,
  1099. XXH64_hash_t seed);
  1100. XXH_PUBLIC_API XXH128_hash_t
  1101. XXH3_128bits_withSecretandSeed(const void* data, size_t len,
  1102. const void* secret, size_t secretSize,
  1103. XXH64_hash_t seed64);
  1104. XXH_PUBLIC_API XXH_errorcode
  1105. XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
  1106. const void* secret, size_t secretSize,
  1107. XXH64_hash_t seed64);
  1108. XXH_PUBLIC_API XXH_errorcode
  1109. XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
  1110. const void* secret, size_t secretSize,
  1111. XXH64_hash_t seed64);
  1112. #endif /* XXH_NO_LONG_LONG */
  1113. #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
  1114. # define XXH_IMPLEMENTATION
  1115. #endif
  1116. #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
  1117. /* ======================================================================== */
  1118. /* ======================================================================== */
  1119. /* ======================================================================== */
  1120. /*-**********************************************************************
  1121. * xxHash implementation
  1122. *-**********************************************************************
  1123. * xxHash's implementation used to be hosted inside xxhash.c.
  1124. *
  1125. * However, inlining requires implementation to be visible to the compiler,
  1126. * hence be included alongside the header.
  1127. * Previously, implementation was hosted inside xxhash.c,
  1128. * which was then #included when inlining was activated.
  1129. * This construction created issues with a few build and install systems,
  1130. * as it required xxhash.c to be stored in /include directory.
  1131. *
  1132. * xxHash implementation is now directly integrated within xxhash.h.
  1133. * As a consequence, xxhash.c is no longer needed in /include.
  1134. *
  1135. * xxhash.c is still available and is still useful.
  1136. * In a "normal" setup, when xxhash is not inlined,
  1137. * xxhash.h only exposes the prototypes and public symbols,
  1138. * while xxhash.c can be built into an object file xxhash.o
  1139. * which can then be linked into the final binary.
  1140. ************************************************************************/
  1141. #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
  1142. || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
  1143. # define XXH_IMPLEM_13a8737387
  1144. /* *************************************
  1145. * Tuning parameters
  1146. ***************************************/
  1147. /*!
  1148. * @defgroup tuning Tuning parameters
  1149. * @{
  1150. *
  1151. * Various macros to control xxHash's behavior.
  1152. */
  1153. #ifdef XXH_DOXYGEN
  1154. /*!
  1155. * @brief Define this to disable 64-bit code.
  1156. *
  1157. * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
  1158. */
  1159. # define XXH_NO_LONG_LONG
  1160. # undef XXH_NO_LONG_LONG /* don't actually */
  1161. /*!
  1162. * @brief Controls how unaligned memory is accessed.
  1163. *
  1164. * By default, access to unaligned memory is controlled by `memcpy()`, which is
  1165. * safe and portable.
  1166. *
  1167. * Unfortunately, on some target/compiler combinations, the generated assembly
  1168. * is sub-optimal.
  1169. *
  1170. * The below switch allow selection of a different access method
  1171. * in the search for improved performance.
  1172. *
  1173. * @par Possible options:
  1174. *
  1175. * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
  1176. * @par
  1177. * Use `memcpy()`. Safe and portable. Note that most modern compilers will
  1178. * eliminate the function call and treat it as an unaligned access.
  1179. *
  1180. * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
  1181. * @par
  1182. * Depends on compiler extensions and is therefore not portable.
  1183. * This method is safe _if_ your compiler supports it,
  1184. * and *generally* as fast or faster than `memcpy`.
  1185. *
  1186. * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
  1187. * @par
  1188. * Casts directly and dereferences. This method doesn't depend on the
  1189. * compiler, but it violates the C standard as it directly dereferences an
  1190. * unaligned pointer. It can generate buggy code on targets which do not
  1191. * support unaligned memory accesses, but in some circumstances, it's the
  1192. * only known way to get the most performance.
  1193. *
  1194. * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
  1195. * @par
  1196. * Also portable. This can generate the best code on old compilers which don't
  1197. * inline small `memcpy()` calls, and it might also be faster on big-endian
  1198. * systems which lack a native byteswap instruction. However, some compilers
  1199. * will emit literal byteshifts even if the target supports unaligned access.
  1200. * .
  1201. *
  1202. * @warning
  1203. * Methods 1 and 2 rely on implementation-defined behavior. Use these with
  1204. * care, as what works on one compiler/platform/optimization level may cause
  1205. * another to read garbage data or even crash.
  1206. *
  1207. * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
  1208. *
  1209. * Prefer these methods in priority order (0 > 3 > 1 > 2)
  1210. */
  1211. # define XXH_FORCE_MEMORY_ACCESS 0
  1212. /*!
  1213. * @def XXH_FORCE_ALIGN_CHECK
  1214. * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
  1215. * and XXH64() only).
  1216. *
  1217. * This is an important performance trick for architectures without decent
  1218. * unaligned memory access performance.
  1219. *
  1220. * It checks for input alignment, and when conditions are met, uses a "fast
  1221. * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
  1222. * faster_ read speed.
  1223. *
  1224. * The check costs one initial branch per hash, which is generally negligible,
  1225. * but not zero.
  1226. *
  1227. * Moreover, it's not useful to generate an additional code path if memory
  1228. * access uses the same instruction for both aligned and unaligned
  1229. * addresses (e.g. x86 and aarch64).
  1230. *
  1231. * In these cases, the alignment check can be removed by setting this macro to 0.
  1232. * Then the code will always use unaligned memory access.
  1233. * Align check is automatically disabled on x86, x64 & arm64,
  1234. * which are platforms known to offer good unaligned memory accesses performance.
  1235. *
  1236. * This option does not affect XXH3 (only XXH32 and XXH64).
  1237. */
  1238. # define XXH_FORCE_ALIGN_CHECK 0
  1239. /*!
  1240. * @def XXH_NO_INLINE_HINTS
  1241. * @brief When non-zero, sets all functions to `static`.
  1242. *
  1243. * By default, xxHash tries to force the compiler to inline almost all internal
  1244. * functions.
  1245. *
  1246. * This can usually improve performance due to reduced jumping and improved
  1247. * constant folding, but significantly increases the size of the binary which
  1248. * might not be favorable.
  1249. *
  1250. * Additionally, sometimes the forced inlining can be detrimental to performance,
  1251. * depending on the architecture.
  1252. *
  1253. * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
  1254. * compiler full control on whether to inline or not.
  1255. *
  1256. * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
  1257. * -fno-inline with GCC or Clang, this will automatically be defined.
  1258. */
  1259. # define XXH_NO_INLINE_HINTS 0
  1260. /*!
  1261. * @def XXH32_ENDJMP
  1262. * @brief Whether to use a jump for `XXH32_finalize`.
  1263. *
  1264. * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
  1265. * This is generally preferable for performance,
  1266. * but depending on exact architecture, a jmp may be preferable.
  1267. *
  1268. * This setting is only possibly making a difference for very small inputs.
  1269. */
  1270. # define XXH32_ENDJMP 0
  1271. /*!
  1272. * @internal
  1273. * @brief Redefines old internal names.
  1274. *
  1275. * For compatibility with code that uses xxHash's internals before the names
  1276. * were changed to improve namespacing. There is no other reason to use this.
  1277. */
  1278. # define XXH_OLD_NAMES
  1279. # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
  1280. #endif /* XXH_DOXYGEN */
  1281. /*!
  1282. * @}
  1283. */
  1284. #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
  1285. /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
  1286. # if !defined(__clang__) && \
  1287. ( \
  1288. (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
  1289. ( \
  1290. defined(__GNUC__) && ( \
  1291. (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
  1292. ( \
  1293. defined(__mips__) && \
  1294. (__mips <= 5 || __mips_isa_rev < 6) && \
  1295. (!defined(__mips16) || defined(__mips_mips16e2)) \
  1296. ) \
  1297. ) \
  1298. ) \
  1299. )
  1300. # define XXH_FORCE_MEMORY_ACCESS 1
  1301. # endif
  1302. #endif
  1303. #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
  1304. # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
  1305. || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
  1306. # define XXH_FORCE_ALIGN_CHECK 0
  1307. # else
  1308. # define XXH_FORCE_ALIGN_CHECK 1
  1309. # endif
  1310. #endif
  1311. #ifndef XXH_NO_INLINE_HINTS
  1312. # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
  1313. || defined(__NO_INLINE__) /* -O0, -fno-inline */
  1314. # define XXH_NO_INLINE_HINTS 1
  1315. # else
  1316. # define XXH_NO_INLINE_HINTS 0
  1317. # endif
  1318. #endif
  1319. #ifndef XXH32_ENDJMP
  1320. /* generally preferable for performance */
  1321. # define XXH32_ENDJMP 0
  1322. #endif
  1323. /*!
  1324. * @defgroup impl Implementation
  1325. * @{
  1326. */
  1327. /* *************************************
  1328. * Includes & Memory related functions
  1329. ***************************************/
  1330. /*
  1331. * Modify the local functions below should you wish to use
  1332. * different memory routines for malloc() and free()
  1333. */
  1334. #include <stdlib.h>
  1335. /*!
  1336. * @internal
  1337. * @brief Modify this function to use a different routine than malloc().
  1338. */
  1339. static void* XXH_malloc(size_t s) { return malloc(s); }
  1340. /*!
  1341. * @internal
  1342. * @brief Modify this function to use a different routine than free().
  1343. */
  1344. static void XXH_free(void* p) { free(p); }
  1345. #include <string.h>
  1346. /*!
  1347. * @internal
  1348. * @brief Modify this function to use a different routine than memcpy().
  1349. */
  1350. static void* XXH_memcpy(void* dest, const void* src, size_t size)
  1351. {
  1352. return memcpy(dest,src,size);
  1353. }
  1354. #include <limits.h> /* ULLONG_MAX */
  1355. /* *************************************
  1356. * Compiler Specific Options
  1357. ***************************************/
  1358. #ifdef _MSC_VER /* Visual Studio warning fix */
  1359. # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
  1360. #endif
  1361. #if XXH_NO_INLINE_HINTS /* disable inlining hints */
  1362. # if defined(__GNUC__) || defined(__clang__)
  1363. # define XXH_FORCE_INLINE static __attribute__((unused))
  1364. # else
  1365. # define XXH_FORCE_INLINE static
  1366. # endif
  1367. # define XXH_NO_INLINE static
  1368. /* enable inlining hints */
  1369. #elif defined(__GNUC__) || defined(__clang__)
  1370. # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
  1371. # define XXH_NO_INLINE static __attribute__((noinline))
  1372. #elif defined(_MSC_VER) /* Visual Studio */
  1373. # define XXH_FORCE_INLINE static __forceinline
  1374. # define XXH_NO_INLINE static __declspec(noinline)
  1375. #elif defined (__cplusplus) \
  1376. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
  1377. # define XXH_FORCE_INLINE static inline
  1378. # define XXH_NO_INLINE static
  1379. #else
  1380. # define XXH_FORCE_INLINE static
  1381. # define XXH_NO_INLINE static
  1382. #endif
  1383. /* *************************************
  1384. * Debug
  1385. ***************************************/
  1386. /*!
  1387. * @ingroup tuning
  1388. * @def XXH_DEBUGLEVEL
  1389. * @brief Sets the debugging level.
  1390. *
  1391. * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
  1392. * compiler's command line options. The value must be a number.
  1393. */
  1394. #ifndef XXH_DEBUGLEVEL
  1395. # ifdef DEBUGLEVEL /* backwards compat */
  1396. # define XXH_DEBUGLEVEL DEBUGLEVEL
  1397. # else
  1398. # define XXH_DEBUGLEVEL 0
  1399. # endif
  1400. #endif
  1401. #if (XXH_DEBUGLEVEL>=1)
  1402. # include <assert.h> /* note: can still be disabled with NDEBUG */
  1403. # define XXH_ASSERT(c) assert(c)
  1404. #else
  1405. # define XXH_ASSERT(c) ((void)0)
  1406. #endif
  1407. /* note: use after variable declarations */
  1408. #ifndef XXH_STATIC_ASSERT
  1409. # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
  1410. # include <assert.h>
  1411. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
  1412. # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
  1413. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
  1414. # else
  1415. # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
  1416. # endif
  1417. # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
  1418. #endif
  1419. /*!
  1420. * @internal
  1421. * @def XXH_COMPILER_GUARD(var)
  1422. * @brief Used to prevent unwanted optimizations for @p var.
  1423. *
  1424. * It uses an empty GCC inline assembly statement with a register constraint
  1425. * which forces @p var into a general purpose register (eg eax, ebx, ecx
  1426. * on x86) and marks it as modified.
  1427. *
  1428. * This is used in a few places to avoid unwanted autovectorization (e.g.
  1429. * XXH32_round()). All vectorization we want is explicit via intrinsics,
  1430. * and _usually_ isn't wanted elsewhere.
  1431. *
  1432. * We also use it to prevent unwanted constant folding for AArch64 in
  1433. * XXH3_initCustomSecret_scalar().
  1434. */
  1435. #if defined(__GNUC__) || defined(__clang__)
  1436. # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
  1437. #else
  1438. # define XXH_COMPILER_GUARD(var) ((void)0)
  1439. #endif
  1440. /* *************************************
  1441. * Basic Types
  1442. ***************************************/
  1443. #if !defined (__VMS) \
  1444. && (defined (__cplusplus) \
  1445. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  1446. # include <stdint.h>
  1447. typedef uint8_t xxh_u8;
  1448. #else
  1449. typedef unsigned char xxh_u8;
  1450. #endif
  1451. typedef XXH32_hash_t xxh_u32;
  1452. #ifdef XXH_OLD_NAMES
  1453. # define BYTE xxh_u8
  1454. # define U8 xxh_u8
  1455. # define U32 xxh_u32
  1456. #endif
  1457. /* *** Memory access *** */
  1458. /*!
  1459. * @internal
  1460. * @fn xxh_u32 XXH_read32(const void* ptr)
  1461. * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
  1462. *
  1463. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  1464. *
  1465. * @param ptr The pointer to read from.
  1466. * @return The 32-bit native endian integer from the bytes at @p ptr.
  1467. */
  1468. /*!
  1469. * @internal
  1470. * @fn xxh_u32 XXH_readLE32(const void* ptr)
  1471. * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
  1472. *
  1473. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  1474. *
  1475. * @param ptr The pointer to read from.
  1476. * @return The 32-bit little endian integer from the bytes at @p ptr.
  1477. */
  1478. /*!
  1479. * @internal
  1480. * @fn xxh_u32 XXH_readBE32(const void* ptr)
  1481. * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
  1482. *
  1483. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  1484. *
  1485. * @param ptr The pointer to read from.
  1486. * @return The 32-bit big endian integer from the bytes at @p ptr.
  1487. */
  1488. /*!
  1489. * @internal
  1490. * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
  1491. * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
  1492. *
  1493. * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
  1494. * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
  1495. * always @ref XXH_alignment::XXH_unaligned.
  1496. *
  1497. * @param ptr The pointer to read from.
  1498. * @param align Whether @p ptr is aligned.
  1499. * @pre
  1500. * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
  1501. * aligned.
  1502. * @return The 32-bit little endian integer from the bytes at @p ptr.
  1503. */
  1504. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  1505. /*
  1506. * Manual byteshift. Best for old compilers which don't inline memcpy.
  1507. * We actually directly use XXH_readLE32 and XXH_readBE32.
  1508. */
  1509. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
  1510. /*
  1511. * Force direct memory access. Only works on CPU which support unaligned memory
  1512. * access in hardware.
  1513. */
  1514. static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
  1515. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
  1516. /*
  1517. * __pack instructions are safer but compiler specific, hence potentially
  1518. * problematic for some compilers.
  1519. *
  1520. * Currently only defined for GCC and ICC.
  1521. */
  1522. #ifdef XXH_OLD_NAMES
  1523. typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
  1524. #endif
  1525. static xxh_u32 XXH_read32(const void* ptr)
  1526. {
  1527. typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
  1528. return ((const xxh_unalign*)ptr)->u32;
  1529. }
  1530. #else
  1531. /*
  1532. * Portable and safe solution. Generally efficient.
  1533. * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
  1534. */
  1535. static xxh_u32 XXH_read32(const void* memPtr)
  1536. {
  1537. xxh_u32 val;
  1538. XXH_memcpy(&val, memPtr, sizeof(val));
  1539. return val;
  1540. }
  1541. #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
  1542. /* *** Endianness *** */
  1543. /*!
  1544. * @ingroup tuning
  1545. * @def XXH_CPU_LITTLE_ENDIAN
  1546. * @brief Whether the target is little endian.
  1547. *
  1548. * Defined to 1 if the target is little endian, or 0 if it is big endian.
  1549. * It can be defined externally, for example on the compiler command line.
  1550. *
  1551. * If it is not defined,
  1552. * a runtime check (which is usually constant folded) is used instead.
  1553. *
  1554. * @note
  1555. * This is not necessarily defined to an integer constant.
  1556. *
  1557. * @see XXH_isLittleEndian() for the runtime check.
  1558. */
  1559. #ifndef XXH_CPU_LITTLE_ENDIAN
  1560. /*
  1561. * Try to detect endianness automatically, to avoid the nonstandard behavior
  1562. * in `XXH_isLittleEndian()`
  1563. */
  1564. # if defined(_WIN32) /* Windows is always little endian */ \
  1565. || defined(__LITTLE_ENDIAN__) \
  1566. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
  1567. # define XXH_CPU_LITTLE_ENDIAN 1
  1568. # elif defined(__BIG_ENDIAN__) \
  1569. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  1570. # define XXH_CPU_LITTLE_ENDIAN 0
  1571. # else
  1572. /*!
  1573. * @internal
  1574. * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
  1575. *
  1576. * Most compilers will constant fold this.
  1577. */
  1578. static int XXH_isLittleEndian(void)
  1579. {
  1580. /*
  1581. * Portable and well-defined behavior.
  1582. * Don't use static: it is detrimental to performance.
  1583. */
  1584. const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
  1585. return one.c[0];
  1586. }
  1587. # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
  1588. # endif
  1589. #endif
  1590. /* ****************************************
  1591. * Compiler-specific Functions and Macros
  1592. ******************************************/
  1593. #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
  1594. #ifdef __has_builtin
  1595. # define XXH_HAS_BUILTIN(x) __has_builtin(x)
  1596. #else
  1597. # define XXH_HAS_BUILTIN(x) 0
  1598. #endif
  1599. /*!
  1600. * @internal
  1601. * @def XXH_rotl32(x,r)
  1602. * @brief 32-bit rotate left.
  1603. *
  1604. * @param x The 32-bit integer to be rotated.
  1605. * @param r The number of bits to rotate.
  1606. * @pre
  1607. * @p r > 0 && @p r < 32
  1608. * @note
  1609. * @p x and @p r may be evaluated multiple times.
  1610. * @return The rotated result.
  1611. */
  1612. #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
  1613. && XXH_HAS_BUILTIN(__builtin_rotateleft64)
  1614. # define XXH_rotl32 __builtin_rotateleft32
  1615. # define XXH_rotl64 __builtin_rotateleft64
  1616. /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
  1617. #elif defined(_MSC_VER)
  1618. # define XXH_rotl32(x,r) _rotl(x,r)
  1619. # define XXH_rotl64(x,r) _rotl64(x,r)
  1620. #else
  1621. # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
  1622. # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
  1623. #endif
  1624. /*!
  1625. * @internal
  1626. * @fn xxh_u32 XXH_swap32(xxh_u32 x)
  1627. * @brief A 32-bit byteswap.
  1628. *
  1629. * @param x The 32-bit integer to byteswap.
  1630. * @return @p x, byteswapped.
  1631. */
  1632. #if defined(_MSC_VER) /* Visual Studio */
  1633. # define XXH_swap32 _byteswap_ulong
  1634. #elif XXH_GCC_VERSION >= 403
  1635. # define XXH_swap32 __builtin_bswap32
  1636. #else
  1637. static xxh_u32 XXH_swap32 (xxh_u32 x)
  1638. {
  1639. return ((x << 24) & 0xff000000 ) |
  1640. ((x << 8) & 0x00ff0000 ) |
  1641. ((x >> 8) & 0x0000ff00 ) |
  1642. ((x >> 24) & 0x000000ff );
  1643. }
  1644. #endif
  1645. /* ***************************
  1646. * Memory reads
  1647. *****************************/
  1648. /*!
  1649. * @internal
  1650. * @brief Enum to indicate whether a pointer is aligned.
  1651. */
  1652. typedef enum {
  1653. XXH_aligned, /*!< Aligned */
  1654. XXH_unaligned /*!< Possibly unaligned */
  1655. } XXH_alignment;
  1656. /*
  1657. * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
  1658. *
  1659. * This is ideal for older compilers which don't inline memcpy.
  1660. */
  1661. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  1662. XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
  1663. {
  1664. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  1665. return bytePtr[0]
  1666. | ((xxh_u32)bytePtr[1] << 8)
  1667. | ((xxh_u32)bytePtr[2] << 16)
  1668. | ((xxh_u32)bytePtr[3] << 24);
  1669. }
  1670. XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
  1671. {
  1672. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  1673. return bytePtr[3]
  1674. | ((xxh_u32)bytePtr[2] << 8)
  1675. | ((xxh_u32)bytePtr[1] << 16)
  1676. | ((xxh_u32)bytePtr[0] << 24);
  1677. }
  1678. #else
  1679. XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
  1680. {
  1681. return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
  1682. }
  1683. static xxh_u32 XXH_readBE32(const void* ptr)
  1684. {
  1685. return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
  1686. }
  1687. #endif
  1688. XXH_FORCE_INLINE xxh_u32
  1689. XXH_readLE32_align(const void* ptr, XXH_alignment align)
  1690. {
  1691. if (align==XXH_unaligned) {
  1692. return XXH_readLE32(ptr);
  1693. } else {
  1694. return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
  1695. }
  1696. }
  1697. /* *************************************
  1698. * Misc
  1699. ***************************************/
  1700. /*! @ingroup public */
  1701. XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
  1702. /* *******************************************************************
  1703. * 32-bit hash functions
  1704. *********************************************************************/
  1705. /*!
  1706. * @}
  1707. * @defgroup xxh32_impl XXH32 implementation
  1708. * @ingroup impl
  1709. * @{
  1710. */
  1711. /* #define instead of static const, to be used as initializers */
  1712. #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
  1713. #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
  1714. #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
  1715. #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
  1716. #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
  1717. #ifdef XXH_OLD_NAMES
  1718. # define PRIME32_1 XXH_PRIME32_1
  1719. # define PRIME32_2 XXH_PRIME32_2
  1720. # define PRIME32_3 XXH_PRIME32_3
  1721. # define PRIME32_4 XXH_PRIME32_4
  1722. # define PRIME32_5 XXH_PRIME32_5
  1723. #endif
  1724. /*!
  1725. * @internal
  1726. * @brief Normal stripe processing routine.
  1727. *
  1728. * This shuffles the bits so that any bit from @p input impacts several bits in
  1729. * @p acc.
  1730. *
  1731. * @param acc The accumulator lane.
  1732. * @param input The stripe of input to mix.
  1733. * @return The mixed accumulator lane.
  1734. */
  1735. static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
  1736. {
  1737. acc += input * XXH_PRIME32_2;
  1738. acc = XXH_rotl32(acc, 13);
  1739. acc *= XXH_PRIME32_1;
  1740. #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
  1741. /*
  1742. * UGLY HACK:
  1743. * A compiler fence is the only thing that prevents GCC and Clang from
  1744. * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
  1745. * reason) without globally disabling SSE4.1.
  1746. *
  1747. * The reason we want to avoid vectorization is because despite working on
  1748. * 4 integers at a time, there are multiple factors slowing XXH32 down on
  1749. * SSE4:
  1750. * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
  1751. * newer chips!) making it slightly slower to multiply four integers at
  1752. * once compared to four integers independently. Even when pmulld was
  1753. * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
  1754. * just to multiply unless doing a long operation.
  1755. *
  1756. * - Four instructions are required to rotate,
  1757. * movqda tmp, v // not required with VEX encoding
  1758. * pslld tmp, 13 // tmp <<= 13
  1759. * psrld v, 19 // x >>= 19
  1760. * por v, tmp // x |= tmp
  1761. * compared to one for scalar:
  1762. * roll v, 13 // reliably fast across the board
  1763. * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
  1764. *
  1765. * - Instruction level parallelism is actually more beneficial here because
  1766. * the SIMD actually serializes this operation: While v1 is rotating, v2
  1767. * can load data, while v3 can multiply. SSE forces them to operate
  1768. * together.
  1769. *
  1770. * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
  1771. * and it is pointless writing a NEON implementation that is basically the
  1772. * same speed as scalar for XXH32.
  1773. */
  1774. XXH_COMPILER_GUARD(acc);
  1775. #endif
  1776. return acc;
  1777. }
  1778. /*!
  1779. * @internal
  1780. * @brief Mixes all bits to finalize the hash.
  1781. *
  1782. * The final mix ensures that all input bits have a chance to impact any bit in
  1783. * the output digest, resulting in an unbiased distribution.
  1784. *
  1785. * @param h32 The hash to avalanche.
  1786. * @return The avalanched hash.
  1787. */
  1788. static xxh_u32 XXH32_avalanche(xxh_u32 h32)
  1789. {
  1790. h32 ^= h32 >> 15;
  1791. h32 *= XXH_PRIME32_2;
  1792. h32 ^= h32 >> 13;
  1793. h32 *= XXH_PRIME32_3;
  1794. h32 ^= h32 >> 16;
  1795. return(h32);
  1796. }
  1797. #define XXH_get32bits(p) XXH_readLE32_align(p, align)
  1798. /*!
  1799. * @internal
  1800. * @brief Processes the last 0-15 bytes of @p ptr.
  1801. *
  1802. * There may be up to 15 bytes remaining to consume from the input.
  1803. * This final stage will digest them to ensure that all input bytes are present
  1804. * in the final mix.
  1805. *
  1806. * @param h32 The hash to finalize.
  1807. * @param ptr The pointer to the remaining input.
  1808. * @param len The remaining length, modulo 16.
  1809. * @param align Whether @p ptr is aligned.
  1810. * @return The finalized hash.
  1811. */
  1812. static xxh_u32
  1813. XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
  1814. {
  1815. #define XXH_PROCESS1 do { \
  1816. h32 += (*ptr++) * XXH_PRIME32_5; \
  1817. h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
  1818. } while (0)
  1819. #define XXH_PROCESS4 do { \
  1820. h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
  1821. ptr += 4; \
  1822. h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
  1823. } while (0)
  1824. if (ptr==NULL) XXH_ASSERT(len == 0);
  1825. /* Compact rerolled version; generally faster */
  1826. if (!XXH32_ENDJMP) {
  1827. len &= 15;
  1828. while (len >= 4) {
  1829. XXH_PROCESS4;
  1830. len -= 4;
  1831. }
  1832. while (len > 0) {
  1833. XXH_PROCESS1;
  1834. --len;
  1835. }
  1836. return XXH32_avalanche(h32);
  1837. } else {
  1838. switch(len&15) /* or switch(bEnd - p) */ {
  1839. case 12: XXH_PROCESS4;
  1840. XXH_FALLTHROUGH;
  1841. case 8: XXH_PROCESS4;
  1842. XXH_FALLTHROUGH;
  1843. case 4: XXH_PROCESS4;
  1844. return XXH32_avalanche(h32);
  1845. case 13: XXH_PROCESS4;
  1846. XXH_FALLTHROUGH;
  1847. case 9: XXH_PROCESS4;
  1848. XXH_FALLTHROUGH;
  1849. case 5: XXH_PROCESS4;
  1850. XXH_PROCESS1;
  1851. return XXH32_avalanche(h32);
  1852. case 14: XXH_PROCESS4;
  1853. XXH_FALLTHROUGH;
  1854. case 10: XXH_PROCESS4;
  1855. XXH_FALLTHROUGH;
  1856. case 6: XXH_PROCESS4;
  1857. XXH_PROCESS1;
  1858. XXH_PROCESS1;
  1859. return XXH32_avalanche(h32);
  1860. case 15: XXH_PROCESS4;
  1861. XXH_FALLTHROUGH;
  1862. case 11: XXH_PROCESS4;
  1863. XXH_FALLTHROUGH;
  1864. case 7: XXH_PROCESS4;
  1865. XXH_FALLTHROUGH;
  1866. case 3: XXH_PROCESS1;
  1867. XXH_FALLTHROUGH;
  1868. case 2: XXH_PROCESS1;
  1869. XXH_FALLTHROUGH;
  1870. case 1: XXH_PROCESS1;
  1871. XXH_FALLTHROUGH;
  1872. case 0: return XXH32_avalanche(h32);
  1873. }
  1874. XXH_ASSERT(0);
  1875. return h32; /* reaching this point is deemed impossible */
  1876. }
  1877. }
  1878. #ifdef XXH_OLD_NAMES
  1879. # define PROCESS1 XXH_PROCESS1
  1880. # define PROCESS4 XXH_PROCESS4
  1881. #else
  1882. # undef XXH_PROCESS1
  1883. # undef XXH_PROCESS4
  1884. #endif
  1885. /*!
  1886. * @internal
  1887. * @brief The implementation for @ref XXH32().
  1888. *
  1889. * @param input , len , seed Directly passed from @ref XXH32().
  1890. * @param align Whether @p input is aligned.
  1891. * @return The calculated hash.
  1892. */
  1893. XXH_FORCE_INLINE xxh_u32
  1894. XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
  1895. {
  1896. xxh_u32 h32;
  1897. if (input==NULL) XXH_ASSERT(len == 0);
  1898. if (len>=16) {
  1899. const xxh_u8* const bEnd = input + len;
  1900. const xxh_u8* const limit = bEnd - 15;
  1901. xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
  1902. xxh_u32 v2 = seed + XXH_PRIME32_2;
  1903. xxh_u32 v3 = seed + 0;
  1904. xxh_u32 v4 = seed - XXH_PRIME32_1;
  1905. do {
  1906. v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
  1907. v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
  1908. v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
  1909. v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
  1910. } while (input < limit);
  1911. h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
  1912. + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
  1913. } else {
  1914. h32 = seed + XXH_PRIME32_5;
  1915. }
  1916. h32 += (xxh_u32)len;
  1917. return XXH32_finalize(h32, input, len&15, align);
  1918. }
  1919. /*! @ingroup xxh32_family */
  1920. XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
  1921. {
  1922. #if 0
  1923. /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
  1924. XXH32_state_t state;
  1925. XXH32_reset(&state, seed);
  1926. XXH32_update(&state, (const xxh_u8*)input, len);
  1927. return XXH32_digest(&state);
  1928. #else
  1929. if (XXH_FORCE_ALIGN_CHECK) {
  1930. if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
  1931. return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
  1932. } }
  1933. return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
  1934. #endif
  1935. }
  1936. /******* Hash streaming *******/
  1937. /*!
  1938. * @ingroup xxh32_family
  1939. */
  1940. XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
  1941. {
  1942. return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
  1943. }
  1944. /*! @ingroup xxh32_family */
  1945. XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
  1946. {
  1947. XXH_free(statePtr);
  1948. return XXH_OK;
  1949. }
  1950. /*! @ingroup xxh32_family */
  1951. XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
  1952. {
  1953. XXH_memcpy(dstState, srcState, sizeof(*dstState));
  1954. }
  1955. /*! @ingroup xxh32_family */
  1956. XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
  1957. {
  1958. XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
  1959. memset(&state, 0, sizeof(state));
  1960. state.v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
  1961. state.v[1] = seed + XXH_PRIME32_2;
  1962. state.v[2] = seed + 0;
  1963. state.v[3] = seed - XXH_PRIME32_1;
  1964. /* do not write into reserved, planned to be removed in a future version */
  1965. XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
  1966. return XXH_OK;
  1967. }
  1968. /*! @ingroup xxh32_family */
  1969. XXH_PUBLIC_API XXH_errorcode
  1970. XXH32_update(XXH32_state_t* state, const void* input, size_t len)
  1971. {
  1972. if (input==NULL) {
  1973. XXH_ASSERT(len == 0);
  1974. return XXH_OK;
  1975. }
  1976. { const xxh_u8* p = (const xxh_u8*)input;
  1977. const xxh_u8* const bEnd = p + len;
  1978. state->total_len_32 += (XXH32_hash_t)len;
  1979. state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
  1980. if (state->memsize + len < 16) { /* fill in tmp buffer */
  1981. XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
  1982. state->memsize += (XXH32_hash_t)len;
  1983. return XXH_OK;
  1984. }
  1985. if (state->memsize) { /* some data left from previous update */
  1986. XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
  1987. { const xxh_u32* p32 = state->mem32;
  1988. state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
  1989. state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
  1990. state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
  1991. state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
  1992. }
  1993. p += 16-state->memsize;
  1994. state->memsize = 0;
  1995. }
  1996. if (p <= bEnd-16) {
  1997. const xxh_u8* const limit = bEnd - 16;
  1998. do {
  1999. state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
  2000. state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
  2001. state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
  2002. state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
  2003. } while (p<=limit);
  2004. }
  2005. if (p < bEnd) {
  2006. XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
  2007. state->memsize = (unsigned)(bEnd-p);
  2008. }
  2009. }
  2010. return XXH_OK;
  2011. }
  2012. /*! @ingroup xxh32_family */
  2013. XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
  2014. {
  2015. xxh_u32 h32;
  2016. if (state->large_len) {
  2017. h32 = XXH_rotl32(state->v[0], 1)
  2018. + XXH_rotl32(state->v[1], 7)
  2019. + XXH_rotl32(state->v[2], 12)
  2020. + XXH_rotl32(state->v[3], 18);
  2021. } else {
  2022. h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
  2023. }
  2024. h32 += state->total_len_32;
  2025. return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
  2026. }
  2027. /******* Canonical representation *******/
  2028. /*!
  2029. * @ingroup xxh32_family
  2030. * The default return values from XXH functions are unsigned 32 and 64 bit
  2031. * integers.
  2032. *
  2033. * The canonical representation uses big endian convention, the same convention
  2034. * as human-readable numbers (large digits first).
  2035. *
  2036. * This way, hash values can be written into a file or buffer, remaining
  2037. * comparable across different systems.
  2038. *
  2039. * The following functions allow transformation of hash values to and from their
  2040. * canonical format.
  2041. */
  2042. XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
  2043. {
  2044. XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
  2045. if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
  2046. XXH_memcpy(dst, &hash, sizeof(*dst));
  2047. }
  2048. /*! @ingroup xxh32_family */
  2049. XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
  2050. {
  2051. return XXH_readBE32(src);
  2052. }
  2053. #ifndef XXH_NO_LONG_LONG
  2054. /* *******************************************************************
  2055. * 64-bit hash functions
  2056. *********************************************************************/
  2057. /*!
  2058. * @}
  2059. * @ingroup impl
  2060. * @{
  2061. */
  2062. /******* Memory access *******/
  2063. typedef XXH64_hash_t xxh_u64;
  2064. #ifdef XXH_OLD_NAMES
  2065. # define U64 xxh_u64
  2066. #endif
  2067. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  2068. /*
  2069. * Manual byteshift. Best for old compilers which don't inline memcpy.
  2070. * We actually directly use XXH_readLE64 and XXH_readBE64.
  2071. */
  2072. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
  2073. /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
  2074. static xxh_u64 XXH_read64(const void* memPtr)
  2075. {
  2076. return *(const xxh_u64*) memPtr;
  2077. }
  2078. #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
  2079. /*
  2080. * __pack instructions are safer, but compiler specific, hence potentially
  2081. * problematic for some compilers.
  2082. *
  2083. * Currently only defined for GCC and ICC.
  2084. */
  2085. #ifdef XXH_OLD_NAMES
  2086. typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
  2087. #endif
  2088. static xxh_u64 XXH_read64(const void* ptr)
  2089. {
  2090. typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
  2091. return ((const xxh_unalign64*)ptr)->u64;
  2092. }
  2093. #else
  2094. /*
  2095. * Portable and safe solution. Generally efficient.
  2096. * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
  2097. */
  2098. static xxh_u64 XXH_read64(const void* memPtr)
  2099. {
  2100. xxh_u64 val;
  2101. XXH_memcpy(&val, memPtr, sizeof(val));
  2102. return val;
  2103. }
  2104. #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
  2105. #if defined(_MSC_VER) /* Visual Studio */
  2106. # define XXH_swap64 _byteswap_uint64
  2107. #elif XXH_GCC_VERSION >= 403
  2108. # define XXH_swap64 __builtin_bswap64
  2109. #else
  2110. static xxh_u64 XXH_swap64(xxh_u64 x)
  2111. {
  2112. return ((x << 56) & 0xff00000000000000ULL) |
  2113. ((x << 40) & 0x00ff000000000000ULL) |
  2114. ((x << 24) & 0x0000ff0000000000ULL) |
  2115. ((x << 8) & 0x000000ff00000000ULL) |
  2116. ((x >> 8) & 0x00000000ff000000ULL) |
  2117. ((x >> 24) & 0x0000000000ff0000ULL) |
  2118. ((x >> 40) & 0x000000000000ff00ULL) |
  2119. ((x >> 56) & 0x00000000000000ffULL);
  2120. }
  2121. #endif
  2122. /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
  2123. #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
  2124. XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
  2125. {
  2126. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  2127. return bytePtr[0]
  2128. | ((xxh_u64)bytePtr[1] << 8)
  2129. | ((xxh_u64)bytePtr[2] << 16)
  2130. | ((xxh_u64)bytePtr[3] << 24)
  2131. | ((xxh_u64)bytePtr[4] << 32)
  2132. | ((xxh_u64)bytePtr[5] << 40)
  2133. | ((xxh_u64)bytePtr[6] << 48)
  2134. | ((xxh_u64)bytePtr[7] << 56);
  2135. }
  2136. XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
  2137. {
  2138. const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
  2139. return bytePtr[7]
  2140. | ((xxh_u64)bytePtr[6] << 8)
  2141. | ((xxh_u64)bytePtr[5] << 16)
  2142. | ((xxh_u64)bytePtr[4] << 24)
  2143. | ((xxh_u64)bytePtr[3] << 32)
  2144. | ((xxh_u64)bytePtr[2] << 40)
  2145. | ((xxh_u64)bytePtr[1] << 48)
  2146. | ((xxh_u64)bytePtr[0] << 56);
  2147. }
  2148. #else
  2149. XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
  2150. {
  2151. return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
  2152. }
  2153. static xxh_u64 XXH_readBE64(const void* ptr)
  2154. {
  2155. return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
  2156. }
  2157. #endif
  2158. XXH_FORCE_INLINE xxh_u64
  2159. XXH_readLE64_align(const void* ptr, XXH_alignment align)
  2160. {
  2161. if (align==XXH_unaligned)
  2162. return XXH_readLE64(ptr);
  2163. else
  2164. return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
  2165. }
  2166. /******* xxh64 *******/
  2167. /*!
  2168. * @}
  2169. * @defgroup xxh64_impl XXH64 implementation
  2170. * @ingroup impl
  2171. * @{
  2172. */
  2173. /* #define rather that static const, to be used as initializers */
  2174. #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
  2175. #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
  2176. #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
  2177. #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
  2178. #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
  2179. #ifdef XXH_OLD_NAMES
  2180. # define PRIME64_1 XXH_PRIME64_1
  2181. # define PRIME64_2 XXH_PRIME64_2
  2182. # define PRIME64_3 XXH_PRIME64_3
  2183. # define PRIME64_4 XXH_PRIME64_4
  2184. # define PRIME64_5 XXH_PRIME64_5
  2185. #endif
  2186. static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
  2187. {
  2188. acc += input * XXH_PRIME64_2;
  2189. acc = XXH_rotl64(acc, 31);
  2190. acc *= XXH_PRIME64_1;
  2191. return acc;
  2192. }
  2193. static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
  2194. {
  2195. val = XXH64_round(0, val);
  2196. acc ^= val;
  2197. acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
  2198. return acc;
  2199. }
  2200. static xxh_u64 XXH64_avalanche(xxh_u64 h64)
  2201. {
  2202. h64 ^= h64 >> 33;
  2203. h64 *= XXH_PRIME64_2;
  2204. h64 ^= h64 >> 29;
  2205. h64 *= XXH_PRIME64_3;
  2206. h64 ^= h64 >> 32;
  2207. return h64;
  2208. }
  2209. #define XXH_get64bits(p) XXH_readLE64_align(p, align)
  2210. static xxh_u64
  2211. XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
  2212. {
  2213. if (ptr==NULL) XXH_ASSERT(len == 0);
  2214. len &= 31;
  2215. while (len >= 8) {
  2216. xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
  2217. ptr += 8;
  2218. h64 ^= k1;
  2219. h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
  2220. len -= 8;
  2221. }
  2222. if (len >= 4) {
  2223. h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
  2224. ptr += 4;
  2225. h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
  2226. len -= 4;
  2227. }
  2228. while (len > 0) {
  2229. h64 ^= (*ptr++) * XXH_PRIME64_5;
  2230. h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
  2231. --len;
  2232. }
  2233. return XXH64_avalanche(h64);
  2234. }
  2235. #ifdef XXH_OLD_NAMES
  2236. # define PROCESS1_64 XXH_PROCESS1_64
  2237. # define PROCESS4_64 XXH_PROCESS4_64
  2238. # define PROCESS8_64 XXH_PROCESS8_64
  2239. #else
  2240. # undef XXH_PROCESS1_64
  2241. # undef XXH_PROCESS4_64
  2242. # undef XXH_PROCESS8_64
  2243. #endif
  2244. XXH_FORCE_INLINE xxh_u64
  2245. XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
  2246. {
  2247. xxh_u64 h64;
  2248. if (input==NULL) XXH_ASSERT(len == 0);
  2249. if (len>=32) {
  2250. const xxh_u8* const bEnd = input + len;
  2251. const xxh_u8* const limit = bEnd - 31;
  2252. xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
  2253. xxh_u64 v2 = seed + XXH_PRIME64_2;
  2254. xxh_u64 v3 = seed + 0;
  2255. xxh_u64 v4 = seed - XXH_PRIME64_1;
  2256. do {
  2257. v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
  2258. v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
  2259. v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
  2260. v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
  2261. } while (input<limit);
  2262. h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
  2263. h64 = XXH64_mergeRound(h64, v1);
  2264. h64 = XXH64_mergeRound(h64, v2);
  2265. h64 = XXH64_mergeRound(h64, v3);
  2266. h64 = XXH64_mergeRound(h64, v4);
  2267. } else {
  2268. h64 = seed + XXH_PRIME64_5;
  2269. }
  2270. h64 += (xxh_u64) len;
  2271. return XXH64_finalize(h64, input, len, align);
  2272. }
  2273. /*! @ingroup xxh64_family */
  2274. XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
  2275. {
  2276. #if 0
  2277. /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
  2278. XXH64_state_t state;
  2279. XXH64_reset(&state, seed);
  2280. XXH64_update(&state, (const xxh_u8*)input, len);
  2281. return XXH64_digest(&state);
  2282. #else
  2283. if (XXH_FORCE_ALIGN_CHECK) {
  2284. if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
  2285. return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
  2286. } }
  2287. return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
  2288. #endif
  2289. }
  2290. /******* Hash Streaming *******/
  2291. /*! @ingroup xxh64_family*/
  2292. XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
  2293. {
  2294. return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
  2295. }
  2296. /*! @ingroup xxh64_family */
  2297. XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
  2298. {
  2299. XXH_free(statePtr);
  2300. return XXH_OK;
  2301. }
  2302. /*! @ingroup xxh64_family */
  2303. XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
  2304. {
  2305. XXH_memcpy(dstState, srcState, sizeof(*dstState));
  2306. }
  2307. /*! @ingroup xxh64_family */
  2308. XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
  2309. {
  2310. XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
  2311. memset(&state, 0, sizeof(state));
  2312. state.v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
  2313. state.v[1] = seed + XXH_PRIME64_2;
  2314. state.v[2] = seed + 0;
  2315. state.v[3] = seed - XXH_PRIME64_1;
  2316. /* do not write into reserved64, might be removed in a future version */
  2317. XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
  2318. return XXH_OK;
  2319. }
  2320. /*! @ingroup xxh64_family */
  2321. XXH_PUBLIC_API XXH_errorcode
  2322. XXH64_update (XXH64_state_t* state, const void* input, size_t len)
  2323. {
  2324. if (input==NULL) {
  2325. XXH_ASSERT(len == 0);
  2326. return XXH_OK;
  2327. }
  2328. { const xxh_u8* p = (const xxh_u8*)input;
  2329. const xxh_u8* const bEnd = p + len;
  2330. state->total_len += len;
  2331. if (state->memsize + len < 32) { /* fill in tmp buffer */
  2332. XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
  2333. state->memsize += (xxh_u32)len;
  2334. return XXH_OK;
  2335. }
  2336. if (state->memsize) { /* tmp buffer is full */
  2337. XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
  2338. state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
  2339. state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
  2340. state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
  2341. state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
  2342. p += 32 - state->memsize;
  2343. state->memsize = 0;
  2344. }
  2345. if (p+32 <= bEnd) {
  2346. const xxh_u8* const limit = bEnd - 32;
  2347. do {
  2348. state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
  2349. state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
  2350. state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
  2351. state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
  2352. } while (p<=limit);
  2353. }
  2354. if (p < bEnd) {
  2355. XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
  2356. state->memsize = (unsigned)(bEnd-p);
  2357. }
  2358. }
  2359. return XXH_OK;
  2360. }
  2361. /*! @ingroup xxh64_family */
  2362. XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
  2363. {
  2364. xxh_u64 h64;
  2365. if (state->total_len >= 32) {
  2366. h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
  2367. h64 = XXH64_mergeRound(h64, state->v[0]);
  2368. h64 = XXH64_mergeRound(h64, state->v[1]);
  2369. h64 = XXH64_mergeRound(h64, state->v[2]);
  2370. h64 = XXH64_mergeRound(h64, state->v[3]);
  2371. } else {
  2372. h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
  2373. }
  2374. h64 += (xxh_u64) state->total_len;
  2375. return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
  2376. }
  2377. /******* Canonical representation *******/
  2378. /*! @ingroup xxh64_family */
  2379. XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
  2380. {
  2381. XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
  2382. if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
  2383. XXH_memcpy(dst, &hash, sizeof(*dst));
  2384. }
  2385. /*! @ingroup xxh64_family */
  2386. XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
  2387. {
  2388. return XXH_readBE64(src);
  2389. }
  2390. #ifndef XXH_NO_XXH3
  2391. /* *********************************************************************
  2392. * XXH3
  2393. * New generation hash designed for speed on small keys and vectorization
  2394. ************************************************************************ */
  2395. /*!
  2396. * @}
  2397. * @defgroup xxh3_impl XXH3 implementation
  2398. * @ingroup impl
  2399. * @{
  2400. */
  2401. /* === Compiler specifics === */
  2402. #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
  2403. # define XXH_RESTRICT /* disable */
  2404. #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
  2405. # define XXH_RESTRICT restrict
  2406. #else
  2407. /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
  2408. # define XXH_RESTRICT /* disable */
  2409. #endif
  2410. #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
  2411. || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
  2412. || defined(__clang__)
  2413. # define XXH_likely(x) __builtin_expect(x, 1)
  2414. # define XXH_unlikely(x) __builtin_expect(x, 0)
  2415. #else
  2416. # define XXH_likely(x) (x)
  2417. # define XXH_unlikely(x) (x)
  2418. #endif
  2419. #if defined(__GNUC__)
  2420. # if defined(__AVX2__)
  2421. # include <immintrin.h>
  2422. # elif defined(__SSE2__)
  2423. # include <emmintrin.h>
  2424. # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
  2425. # define inline __inline__ /* circumvent a clang bug */
  2426. # include <arm_neon.h>
  2427. # undef inline
  2428. # endif
  2429. #elif defined(_MSC_VER)
  2430. # include <intrin.h>
  2431. #endif
  2432. /*
  2433. * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
  2434. * remaining a true 64-bit/128-bit hash function.
  2435. *
  2436. * This is done by prioritizing a subset of 64-bit operations that can be
  2437. * emulated without too many steps on the average 32-bit machine.
  2438. *
  2439. * For example, these two lines seem similar, and run equally fast on 64-bit:
  2440. *
  2441. * xxh_u64 x;
  2442. * x ^= (x >> 47); // good
  2443. * x ^= (x >> 13); // bad
  2444. *
  2445. * However, to a 32-bit machine, there is a major difference.
  2446. *
  2447. * x ^= (x >> 47) looks like this:
  2448. *
  2449. * x.lo ^= (x.hi >> (47 - 32));
  2450. *
  2451. * while x ^= (x >> 13) looks like this:
  2452. *
  2453. * // note: funnel shifts are not usually cheap.
  2454. * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
  2455. * x.hi ^= (x.hi >> 13);
  2456. *
  2457. * The first one is significantly faster than the second, simply because the
  2458. * shift is larger than 32. This means:
  2459. * - All the bits we need are in the upper 32 bits, so we can ignore the lower
  2460. * 32 bits in the shift.
  2461. * - The shift result will always fit in the lower 32 bits, and therefore,
  2462. * we can ignore the upper 32 bits in the xor.
  2463. *
  2464. * Thanks to this optimization, XXH3 only requires these features to be efficient:
  2465. *
  2466. * - Usable unaligned access
  2467. * - A 32-bit or 64-bit ALU
  2468. * - If 32-bit, a decent ADC instruction
  2469. * - A 32 or 64-bit multiply with a 64-bit result
  2470. * - For the 128-bit variant, a decent byteswap helps short inputs.
  2471. *
  2472. * The first two are already required by XXH32, and almost all 32-bit and 64-bit
  2473. * platforms which can run XXH32 can run XXH3 efficiently.
  2474. *
  2475. * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
  2476. * notable exception.
  2477. *
  2478. * First of all, Thumb-1 lacks support for the UMULL instruction which
  2479. * performs the important long multiply. This means numerous __aeabi_lmul
  2480. * calls.
  2481. *
  2482. * Second of all, the 8 functional registers are just not enough.
  2483. * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
  2484. * Lo registers, and this shuffling results in thousands more MOVs than A32.
  2485. *
  2486. * A32 and T32 don't have this limitation. They can access all 14 registers,
  2487. * do a 32->64 multiply with UMULL, and the flexible operand allowing free
  2488. * shifts is helpful, too.
  2489. *
  2490. * Therefore, we do a quick sanity check.
  2491. *
  2492. * If compiling Thumb-1 for a target which supports ARM instructions, we will
  2493. * emit a warning, as it is not a "sane" platform to compile for.
  2494. *
  2495. * Usually, if this happens, it is because of an accident and you probably need
  2496. * to specify -march, as you likely meant to compile for a newer architecture.
  2497. *
  2498. * Credit: large sections of the vectorial and asm source code paths
  2499. * have been contributed by @easyaspi314
  2500. */
  2501. #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
  2502. # warning "XXH3 is highly inefficient without ARM or Thumb-2."
  2503. #endif
  2504. /* ==========================================
  2505. * Vectorization detection
  2506. * ========================================== */
  2507. #ifdef XXH_DOXYGEN
  2508. /*!
  2509. * @ingroup tuning
  2510. * @brief Overrides the vectorization implementation chosen for XXH3.
  2511. *
  2512. * Can be defined to 0 to disable SIMD or any of the values mentioned in
  2513. * @ref XXH_VECTOR_TYPE.
  2514. *
  2515. * If this is not defined, it uses predefined macros to determine the best
  2516. * implementation.
  2517. */
  2518. # define XXH_VECTOR XXH_SCALAR
  2519. /*!
  2520. * @ingroup tuning
  2521. * @brief Possible values for @ref XXH_VECTOR.
  2522. *
  2523. * Note that these are actually implemented as macros.
  2524. *
  2525. * If this is not defined, it is detected automatically.
  2526. * @ref XXH_X86DISPATCH overrides this.
  2527. */
  2528. enum XXH_VECTOR_TYPE /* fake enum */ {
  2529. XXH_SCALAR = 0, /*!< Portable scalar version */
  2530. XXH_SSE2 = 1, /*!<
  2531. * SSE2 for Pentium 4, Opteron, all x86_64.
  2532. *
  2533. * @note SSE2 is also guaranteed on Windows 10, macOS, and
  2534. * Android x86.
  2535. */
  2536. XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
  2537. XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
  2538. XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
  2539. XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
  2540. };
  2541. /*!
  2542. * @ingroup tuning
  2543. * @brief Selects the minimum alignment for XXH3's accumulators.
  2544. *
  2545. * When using SIMD, this should match the alignment reqired for said vector
  2546. * type, so, for example, 32 for AVX2.
  2547. *
  2548. * Default: Auto detected.
  2549. */
  2550. # define XXH_ACC_ALIGN 8
  2551. #endif
  2552. /* Actual definition */
  2553. #ifndef XXH_DOXYGEN
  2554. # define XXH_SCALAR 0
  2555. # define XXH_SSE2 1
  2556. # define XXH_AVX2 2
  2557. # define XXH_AVX512 3
  2558. # define XXH_NEON 4
  2559. # define XXH_VSX 5
  2560. #endif
  2561. #ifndef XXH_VECTOR /* can be defined on command line */
  2562. # if defined(__AVX512F__)
  2563. # define XXH_VECTOR XXH_AVX512
  2564. # elif defined(__AVX2__)
  2565. # define XXH_VECTOR XXH_AVX2
  2566. # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
  2567. # define XXH_VECTOR XXH_SSE2
  2568. # elif ( \
  2569. defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
  2570. || defined(_M_ARM64) || defined(_M_ARM_ARMV7VE) /* msvc */ \
  2571. ) && ( \
  2572. defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
  2573. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
  2574. )
  2575. # define XXH_VECTOR XXH_NEON
  2576. # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
  2577. || (defined(__s390x__) && defined(__VEC__)) \
  2578. && defined(__GNUC__) /* TODO: IBM XL */
  2579. # define XXH_VECTOR XXH_VSX
  2580. # else
  2581. # define XXH_VECTOR XXH_SCALAR
  2582. # endif
  2583. #endif
  2584. /*
  2585. * Controls the alignment of the accumulator,
  2586. * for compatibility with aligned vector loads, which are usually faster.
  2587. */
  2588. #ifndef XXH_ACC_ALIGN
  2589. # if defined(XXH_X86DISPATCH)
  2590. # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
  2591. # elif XXH_VECTOR == XXH_SCALAR /* scalar */
  2592. # define XXH_ACC_ALIGN 8
  2593. # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
  2594. # define XXH_ACC_ALIGN 16
  2595. # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
  2596. # define XXH_ACC_ALIGN 32
  2597. # elif XXH_VECTOR == XXH_NEON /* neon */
  2598. # define XXH_ACC_ALIGN 16
  2599. # elif XXH_VECTOR == XXH_VSX /* vsx */
  2600. # define XXH_ACC_ALIGN 16
  2601. # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
  2602. # define XXH_ACC_ALIGN 64
  2603. # endif
  2604. #endif
  2605. #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
  2606. || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
  2607. # define XXH_SEC_ALIGN XXH_ACC_ALIGN
  2608. #else
  2609. # define XXH_SEC_ALIGN 8
  2610. #endif
  2611. /*
  2612. * UGLY HACK:
  2613. * GCC usually generates the best code with -O3 for xxHash.
  2614. *
  2615. * However, when targeting AVX2, it is overzealous in its unrolling resulting
  2616. * in code roughly 3/4 the speed of Clang.
  2617. *
  2618. * There are other issues, such as GCC splitting _mm256_loadu_si256 into
  2619. * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
  2620. * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
  2621. *
  2622. * That is why when compiling the AVX2 version, it is recommended to use either
  2623. * -O2 -mavx2 -march=haswell
  2624. * or
  2625. * -O2 -mavx2 -mno-avx256-split-unaligned-load
  2626. * for decent performance, or to use Clang instead.
  2627. *
  2628. * Fortunately, we can control the first one with a pragma that forces GCC into
  2629. * -O2, but the other one we can't control without "failed to inline always
  2630. * inline function due to target mismatch" warnings.
  2631. */
  2632. #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
  2633. && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  2634. && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
  2635. # pragma GCC push_options
  2636. # pragma GCC optimize("-O2")
  2637. #endif
  2638. #if XXH_VECTOR == XXH_NEON
  2639. /*
  2640. * NEON's setup for vmlal_u32 is a little more complicated than it is on
  2641. * SSE2, AVX2, and VSX.
  2642. *
  2643. * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
  2644. *
  2645. * To do the same operation, the 128-bit 'Q' register needs to be split into
  2646. * two 64-bit 'D' registers, performing this operation::
  2647. *
  2648. * [ a | b ]
  2649. * | '---------. .--------' |
  2650. * | x |
  2651. * | .---------' '--------. |
  2652. * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
  2653. *
  2654. * Due to significant changes in aarch64, the fastest method for aarch64 is
  2655. * completely different than the fastest method for ARMv7-A.
  2656. *
  2657. * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
  2658. * D11 will modify the high half of Q5. This is similar to how modifying AH
  2659. * will only affect bits 8-15 of AX on x86.
  2660. *
  2661. * VZIP takes two registers, and puts even lanes in one register and odd lanes
  2662. * in the other.
  2663. *
  2664. * On ARMv7-A, this strangely modifies both parameters in place instead of
  2665. * taking the usual 3-operand form.
  2666. *
  2667. * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
  2668. * lower and upper halves of the Q register to end up with the high and low
  2669. * halves where we want - all in one instruction.
  2670. *
  2671. * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
  2672. *
  2673. * Unfortunately we need inline assembly for this: Instructions modifying two
  2674. * registers at once is not possible in GCC or Clang's IR, and they have to
  2675. * create a copy.
  2676. *
  2677. * aarch64 requires a different approach.
  2678. *
  2679. * In order to make it easier to write a decent compiler for aarch64, many
  2680. * quirks were removed, such as conditional execution.
  2681. *
  2682. * NEON was also affected by this.
  2683. *
  2684. * aarch64 cannot access the high bits of a Q-form register, and writes to a
  2685. * D-form register zero the high bits, similar to how writes to W-form scalar
  2686. * registers (or DWORD registers on x86_64) work.
  2687. *
  2688. * The formerly free vget_high intrinsics now require a vext (with a few
  2689. * exceptions)
  2690. *
  2691. * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
  2692. * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
  2693. * operand.
  2694. *
  2695. * The equivalent of the VZIP.32 on the lower and upper halves would be this
  2696. * mess:
  2697. *
  2698. * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
  2699. * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
  2700. * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
  2701. *
  2702. * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
  2703. *
  2704. * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
  2705. * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
  2706. *
  2707. * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
  2708. */
  2709. /*!
  2710. * Function-like macro:
  2711. * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
  2712. * {
  2713. * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
  2714. * outHi = (uint32x2_t)(in >> 32);
  2715. * in = UNDEFINED;
  2716. * }
  2717. */
  2718. # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
  2719. && defined(__GNUC__) \
  2720. && !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64)
  2721. # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
  2722. do { \
  2723. /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
  2724. /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
  2725. /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
  2726. __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
  2727. (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
  2728. (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
  2729. } while (0)
  2730. # else
  2731. # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
  2732. do { \
  2733. (outLo) = vmovn_u64 (in); \
  2734. (outHi) = vshrn_n_u64 ((in), 32); \
  2735. } while (0)
  2736. # endif
  2737. #endif /* XXH_VECTOR == XXH_NEON */
  2738. /*
  2739. * VSX and Z Vector helpers.
  2740. *
  2741. * This is very messy, and any pull requests to clean this up are welcome.
  2742. *
  2743. * There are a lot of problems with supporting VSX and s390x, due to
  2744. * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
  2745. */
  2746. #if XXH_VECTOR == XXH_VSX
  2747. # if defined(__s390x__)
  2748. # include <s390intrin.h>
  2749. # else
  2750. /* gcc's altivec.h can have the unwanted consequence to unconditionally
  2751. * #define bool, vector, and pixel keywords,
  2752. * with bad consequences for programs already using these keywords for other purposes.
  2753. * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
  2754. * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
  2755. * but it seems that, in some cases, it isn't.
  2756. * Force the build macro to be defined, so that keywords are not altered.
  2757. */
  2758. # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
  2759. # define __APPLE_ALTIVEC__
  2760. # endif
  2761. # include <altivec.h>
  2762. # endif
  2763. typedef __vector unsigned long long xxh_u64x2;
  2764. typedef __vector unsigned char xxh_u8x16;
  2765. typedef __vector unsigned xxh_u32x4;
  2766. # ifndef XXH_VSX_BE
  2767. # if defined(__BIG_ENDIAN__) \
  2768. || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  2769. # define XXH_VSX_BE 1
  2770. # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
  2771. # warning "-maltivec=be is not recommended. Please use native endianness."
  2772. # define XXH_VSX_BE 1
  2773. # else
  2774. # define XXH_VSX_BE 0
  2775. # endif
  2776. # endif /* !defined(XXH_VSX_BE) */
  2777. # if XXH_VSX_BE
  2778. # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
  2779. # define XXH_vec_revb vec_revb
  2780. # else
  2781. /*!
  2782. * A polyfill for POWER9's vec_revb().
  2783. */
  2784. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
  2785. {
  2786. xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
  2787. 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
  2788. return vec_perm(val, val, vByteSwap);
  2789. }
  2790. # endif
  2791. # endif /* XXH_VSX_BE */
  2792. /*!
  2793. * Performs an unaligned vector load and byte swaps it on big endian.
  2794. */
  2795. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
  2796. {
  2797. xxh_u64x2 ret;
  2798. XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
  2799. # if XXH_VSX_BE
  2800. ret = XXH_vec_revb(ret);
  2801. # endif
  2802. return ret;
  2803. }
  2804. /*
  2805. * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
  2806. *
  2807. * These intrinsics weren't added until GCC 8, despite existing for a while,
  2808. * and they are endian dependent. Also, their meaning swap depending on version.
  2809. * */
  2810. # if defined(__s390x__)
  2811. /* s390x is always big endian, no issue on this platform */
  2812. # define XXH_vec_mulo vec_mulo
  2813. # define XXH_vec_mule vec_mule
  2814. # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
  2815. /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
  2816. # define XXH_vec_mulo __builtin_altivec_vmulouw
  2817. # define XXH_vec_mule __builtin_altivec_vmuleuw
  2818. # else
  2819. /* gcc needs inline assembly */
  2820. /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
  2821. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
  2822. {
  2823. xxh_u64x2 result;
  2824. __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
  2825. return result;
  2826. }
  2827. XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
  2828. {
  2829. xxh_u64x2 result;
  2830. __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
  2831. return result;
  2832. }
  2833. # endif /* XXH_vec_mulo, XXH_vec_mule */
  2834. #endif /* XXH_VECTOR == XXH_VSX */
  2835. /* prefetch
  2836. * can be disabled, by declaring XXH_NO_PREFETCH build macro */
  2837. #if defined(XXH_NO_PREFETCH)
  2838. # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
  2839. #else
  2840. # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
  2841. # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
  2842. # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
  2843. # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
  2844. # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
  2845. # else
  2846. # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
  2847. # endif
  2848. #endif /* XXH_NO_PREFETCH */
  2849. /* ==========================================
  2850. * XXH3 default settings
  2851. * ========================================== */
  2852. #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
  2853. #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
  2854. # error "default keyset is not large enough"
  2855. #endif
  2856. /*! Pseudorandom secret taken directly from FARSH. */
  2857. XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
  2858. 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
  2859. 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
  2860. 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
  2861. 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
  2862. 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
  2863. 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
  2864. 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
  2865. 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
  2866. 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
  2867. 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
  2868. 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
  2869. 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
  2870. };
  2871. #ifdef XXH_OLD_NAMES
  2872. # define kSecret XXH3_kSecret
  2873. #endif
  2874. #ifdef XXH_DOXYGEN
  2875. /*!
  2876. * @brief Calculates a 32-bit to 64-bit long multiply.
  2877. *
  2878. * Implemented as a macro.
  2879. *
  2880. * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
  2881. * need to (but it shouldn't need to anyways, it is about 7 instructions to do
  2882. * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
  2883. * use that instead of the normal method.
  2884. *
  2885. * If you are compiling for platforms like Thumb-1 and don't have a better option,
  2886. * you may also want to write your own long multiply routine here.
  2887. *
  2888. * @param x, y Numbers to be multiplied
  2889. * @return 64-bit product of the low 32 bits of @p x and @p y.
  2890. */
  2891. XXH_FORCE_INLINE xxh_u64
  2892. XXH_mult32to64(xxh_u64 x, xxh_u64 y)
  2893. {
  2894. return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
  2895. }
  2896. #elif defined(_MSC_VER) && defined(_M_IX86)
  2897. # include <intrin.h>
  2898. # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
  2899. #else
  2900. /*
  2901. * Downcast + upcast is usually better than masking on older compilers like
  2902. * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
  2903. *
  2904. * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
  2905. * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
  2906. */
  2907. # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
  2908. #endif
  2909. /*!
  2910. * @brief Calculates a 64->128-bit long multiply.
  2911. *
  2912. * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
  2913. * version.
  2914. *
  2915. * @param lhs , rhs The 64-bit integers to be multiplied
  2916. * @return The 128-bit result represented in an @ref XXH128_hash_t.
  2917. */
  2918. static XXH128_hash_t
  2919. XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
  2920. {
  2921. /*
  2922. * GCC/Clang __uint128_t method.
  2923. *
  2924. * On most 64-bit targets, GCC and Clang define a __uint128_t type.
  2925. * This is usually the best way as it usually uses a native long 64-bit
  2926. * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
  2927. *
  2928. * Usually.
  2929. *
  2930. * Despite being a 32-bit platform, Clang (and emscripten) define this type
  2931. * despite not having the arithmetic for it. This results in a laggy
  2932. * compiler builtin call which calculates a full 128-bit multiply.
  2933. * In that case it is best to use the portable one.
  2934. * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
  2935. */
  2936. #if defined(__GNUC__) && !defined(__wasm__) \
  2937. && defined(__SIZEOF_INT128__) \
  2938. || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
  2939. __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
  2940. XXH128_hash_t r128;
  2941. r128.low64 = (xxh_u64)(product);
  2942. r128.high64 = (xxh_u64)(product >> 64);
  2943. return r128;
  2944. /*
  2945. * MSVC for x64's _umul128 method.
  2946. *
  2947. * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
  2948. *
  2949. * This compiles to single operand MUL on x64.
  2950. */
  2951. #elif defined(_M_X64) || defined(_M_IA64)
  2952. #ifndef _MSC_VER
  2953. # pragma intrinsic(_umul128)
  2954. #endif
  2955. xxh_u64 product_high;
  2956. xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
  2957. XXH128_hash_t r128;
  2958. r128.low64 = product_low;
  2959. r128.high64 = product_high;
  2960. return r128;
  2961. /*
  2962. * MSVC for ARM64's __umulh method.
  2963. *
  2964. * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
  2965. */
  2966. #elif defined(_M_ARM64)
  2967. #ifndef _MSC_VER
  2968. # pragma intrinsic(__umulh)
  2969. #endif
  2970. XXH128_hash_t r128;
  2971. r128.low64 = lhs * rhs;
  2972. r128.high64 = __umulh(lhs, rhs);
  2973. return r128;
  2974. #else
  2975. /*
  2976. * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
  2977. *
  2978. * This is a fast and simple grade school multiply, which is shown below
  2979. * with base 10 arithmetic instead of base 0x100000000.
  2980. *
  2981. * 9 3 // D2 lhs = 93
  2982. * x 7 5 // D2 rhs = 75
  2983. * ----------
  2984. * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
  2985. * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
  2986. * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
  2987. * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
  2988. * ---------
  2989. * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
  2990. * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
  2991. * ---------
  2992. * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
  2993. *
  2994. * The reasons for adding the products like this are:
  2995. * 1. It avoids manual carry tracking. Just like how
  2996. * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
  2997. * This avoids a lot of complexity.
  2998. *
  2999. * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
  3000. * instruction available in ARM's Digital Signal Processing extension
  3001. * in 32-bit ARMv6 and later, which is shown below:
  3002. *
  3003. * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
  3004. * {
  3005. * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
  3006. * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
  3007. * *RdHi = (xxh_u32)(product >> 32);
  3008. * }
  3009. *
  3010. * This instruction was designed for efficient long multiplication, and
  3011. * allows this to be calculated in only 4 instructions at speeds
  3012. * comparable to some 64-bit ALUs.
  3013. *
  3014. * 3. It isn't terrible on other platforms. Usually this will be a couple
  3015. * of 32-bit ADD/ADCs.
  3016. */
  3017. /* First calculate all of the cross products. */
  3018. xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
  3019. xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
  3020. xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
  3021. xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
  3022. /* Now add the products together. These will never overflow. */
  3023. xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
  3024. xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
  3025. xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
  3026. XXH128_hash_t r128;
  3027. r128.low64 = lower;
  3028. r128.high64 = upper;
  3029. return r128;
  3030. #endif
  3031. }
  3032. /*!
  3033. * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
  3034. *
  3035. * The reason for the separate function is to prevent passing too many structs
  3036. * around by value. This will hopefully inline the multiply, but we don't force it.
  3037. *
  3038. * @param lhs , rhs The 64-bit integers to multiply
  3039. * @return The low 64 bits of the product XOR'd by the high 64 bits.
  3040. * @see XXH_mult64to128()
  3041. */
  3042. static xxh_u64
  3043. XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
  3044. {
  3045. XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
  3046. return product.low64 ^ product.high64;
  3047. }
  3048. /*! Seems to produce slightly better code on GCC for some reason. */
  3049. XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
  3050. {
  3051. XXH_ASSERT(0 <= shift && shift < 64);
  3052. return v64 ^ (v64 >> shift);
  3053. }
  3054. /*
  3055. * This is a fast avalanche stage,
  3056. * suitable when input bits are already partially mixed
  3057. */
  3058. static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
  3059. {
  3060. h64 = XXH_xorshift64(h64, 37);
  3061. h64 *= 0x165667919E3779F9ULL;
  3062. h64 = XXH_xorshift64(h64, 32);
  3063. return h64;
  3064. }
  3065. /*
  3066. * This is a stronger avalanche,
  3067. * inspired by Pelle Evensen's rrmxmx
  3068. * preferable when input has not been previously mixed
  3069. */
  3070. static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
  3071. {
  3072. /* this mix is inspired by Pelle Evensen's rrmxmx */
  3073. h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
  3074. h64 *= 0x9FB21C651E98DF25ULL;
  3075. h64 ^= (h64 >> 35) + len ;
  3076. h64 *= 0x9FB21C651E98DF25ULL;
  3077. return XXH_xorshift64(h64, 28);
  3078. }
  3079. /* ==========================================
  3080. * Short keys
  3081. * ==========================================
  3082. * One of the shortcomings of XXH32 and XXH64 was that their performance was
  3083. * sub-optimal on short lengths. It used an iterative algorithm which strongly
  3084. * favored lengths that were a multiple of 4 or 8.
  3085. *
  3086. * Instead of iterating over individual inputs, we use a set of single shot
  3087. * functions which piece together a range of lengths and operate in constant time.
  3088. *
  3089. * Additionally, the number of multiplies has been significantly reduced. This
  3090. * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
  3091. *
  3092. * Depending on the platform, this may or may not be faster than XXH32, but it
  3093. * is almost guaranteed to be faster than XXH64.
  3094. */
  3095. /*
  3096. * At very short lengths, there isn't enough input to fully hide secrets, or use
  3097. * the entire secret.
  3098. *
  3099. * There is also only a limited amount of mixing we can do before significantly
  3100. * impacting performance.
  3101. *
  3102. * Therefore, we use different sections of the secret and always mix two secret
  3103. * samples with an XOR. This should have no effect on performance on the
  3104. * seedless or withSeed variants because everything _should_ be constant folded
  3105. * by modern compilers.
  3106. *
  3107. * The XOR mixing hides individual parts of the secret and increases entropy.
  3108. *
  3109. * This adds an extra layer of strength for custom secrets.
  3110. */
  3111. XXH_FORCE_INLINE XXH64_hash_t
  3112. XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  3113. {
  3114. XXH_ASSERT(input != NULL);
  3115. XXH_ASSERT(1 <= len && len <= 3);
  3116. XXH_ASSERT(secret != NULL);
  3117. /*
  3118. * len = 1: combined = { input[0], 0x01, input[0], input[0] }
  3119. * len = 2: combined = { input[1], 0x02, input[0], input[1] }
  3120. * len = 3: combined = { input[2], 0x03, input[0], input[1] }
  3121. */
  3122. { xxh_u8 const c1 = input[0];
  3123. xxh_u8 const c2 = input[len >> 1];
  3124. xxh_u8 const c3 = input[len - 1];
  3125. xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
  3126. | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
  3127. xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
  3128. xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
  3129. return XXH64_avalanche(keyed);
  3130. }
  3131. }
  3132. XXH_FORCE_INLINE XXH64_hash_t
  3133. XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  3134. {
  3135. XXH_ASSERT(input != NULL);
  3136. XXH_ASSERT(secret != NULL);
  3137. XXH_ASSERT(4 <= len && len <= 8);
  3138. seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
  3139. { xxh_u32 const input1 = XXH_readLE32(input);
  3140. xxh_u32 const input2 = XXH_readLE32(input + len - 4);
  3141. xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
  3142. xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
  3143. xxh_u64 const keyed = input64 ^ bitflip;
  3144. return XXH3_rrmxmx(keyed, len);
  3145. }
  3146. }
  3147. XXH_FORCE_INLINE XXH64_hash_t
  3148. XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  3149. {
  3150. XXH_ASSERT(input != NULL);
  3151. XXH_ASSERT(secret != NULL);
  3152. XXH_ASSERT(9 <= len && len <= 16);
  3153. { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
  3154. xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
  3155. xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
  3156. xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
  3157. xxh_u64 const acc = len
  3158. + XXH_swap64(input_lo) + input_hi
  3159. + XXH3_mul128_fold64(input_lo, input_hi);
  3160. return XXH3_avalanche(acc);
  3161. }
  3162. }
  3163. XXH_FORCE_INLINE XXH64_hash_t
  3164. XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  3165. {
  3166. XXH_ASSERT(len <= 16);
  3167. { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
  3168. if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
  3169. if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
  3170. return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
  3171. }
  3172. }
  3173. /*
  3174. * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
  3175. * multiplication by zero, affecting hashes of lengths 17 to 240.
  3176. *
  3177. * However, they are very unlikely.
  3178. *
  3179. * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
  3180. * unseeded non-cryptographic hashes, it does not attempt to defend itself
  3181. * against specially crafted inputs, only random inputs.
  3182. *
  3183. * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
  3184. * cancelling out the secret is taken an arbitrary number of times (addressed
  3185. * in XXH3_accumulate_512), this collision is very unlikely with random inputs
  3186. * and/or proper seeding:
  3187. *
  3188. * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
  3189. * function that is only called up to 16 times per hash with up to 240 bytes of
  3190. * input.
  3191. *
  3192. * This is not too bad for a non-cryptographic hash function, especially with
  3193. * only 64 bit outputs.
  3194. *
  3195. * The 128-bit variant (which trades some speed for strength) is NOT affected
  3196. * by this, although it is always a good idea to use a proper seed if you care
  3197. * about strength.
  3198. */
  3199. XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
  3200. const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
  3201. {
  3202. #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  3203. && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
  3204. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
  3205. /*
  3206. * UGLY HACK:
  3207. * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
  3208. * slower code.
  3209. *
  3210. * By forcing seed64 into a register, we disrupt the cost model and
  3211. * cause it to scalarize. See `XXH32_round()`
  3212. *
  3213. * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
  3214. * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
  3215. * GCC 9.2, despite both emitting scalar code.
  3216. *
  3217. * GCC generates much better scalar code than Clang for the rest of XXH3,
  3218. * which is why finding a more optimal codepath is an interest.
  3219. */
  3220. XXH_COMPILER_GUARD(seed64);
  3221. #endif
  3222. { xxh_u64 const input_lo = XXH_readLE64(input);
  3223. xxh_u64 const input_hi = XXH_readLE64(input+8);
  3224. return XXH3_mul128_fold64(
  3225. input_lo ^ (XXH_readLE64(secret) + seed64),
  3226. input_hi ^ (XXH_readLE64(secret+8) - seed64)
  3227. );
  3228. }
  3229. }
  3230. /* For mid range keys, XXH3 uses a Mum-hash variant. */
  3231. XXH_FORCE_INLINE XXH64_hash_t
  3232. XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
  3233. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  3234. XXH64_hash_t seed)
  3235. {
  3236. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  3237. XXH_ASSERT(16 < len && len <= 128);
  3238. { xxh_u64 acc = len * XXH_PRIME64_1;
  3239. if (len > 32) {
  3240. if (len > 64) {
  3241. if (len > 96) {
  3242. acc += XXH3_mix16B(input+48, secret+96, seed);
  3243. acc += XXH3_mix16B(input+len-64, secret+112, seed);
  3244. }
  3245. acc += XXH3_mix16B(input+32, secret+64, seed);
  3246. acc += XXH3_mix16B(input+len-48, secret+80, seed);
  3247. }
  3248. acc += XXH3_mix16B(input+16, secret+32, seed);
  3249. acc += XXH3_mix16B(input+len-32, secret+48, seed);
  3250. }
  3251. acc += XXH3_mix16B(input+0, secret+0, seed);
  3252. acc += XXH3_mix16B(input+len-16, secret+16, seed);
  3253. return XXH3_avalanche(acc);
  3254. }
  3255. }
  3256. #define XXH3_MIDSIZE_MAX 240
  3257. XXH_NO_INLINE XXH64_hash_t
  3258. XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
  3259. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  3260. XXH64_hash_t seed)
  3261. {
  3262. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  3263. XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
  3264. #define XXH3_MIDSIZE_STARTOFFSET 3
  3265. #define XXH3_MIDSIZE_LASTOFFSET 17
  3266. { xxh_u64 acc = len * XXH_PRIME64_1;
  3267. int const nbRounds = (int)len / 16;
  3268. int i;
  3269. for (i=0; i<8; i++) {
  3270. acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
  3271. }
  3272. acc = XXH3_avalanche(acc);
  3273. XXH_ASSERT(nbRounds >= 8);
  3274. #if defined(__clang__) /* Clang */ \
  3275. && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
  3276. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
  3277. /*
  3278. * UGLY HACK:
  3279. * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
  3280. * In everywhere else, it uses scalar code.
  3281. *
  3282. * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
  3283. * would still be slower than UMAAL (see XXH_mult64to128).
  3284. *
  3285. * Unfortunately, Clang doesn't handle the long multiplies properly and
  3286. * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
  3287. * scalarized into an ugly mess of VMOV.32 instructions.
  3288. *
  3289. * This mess is difficult to avoid without turning autovectorization
  3290. * off completely, but they are usually relatively minor and/or not
  3291. * worth it to fix.
  3292. *
  3293. * This loop is the easiest to fix, as unlike XXH32, this pragma
  3294. * _actually works_ because it is a loop vectorization instead of an
  3295. * SLP vectorization.
  3296. */
  3297. #pragma clang loop vectorize(disable)
  3298. #endif
  3299. for (i=8 ; i < nbRounds; i++) {
  3300. acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
  3301. }
  3302. /* last bytes */
  3303. acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
  3304. return XXH3_avalanche(acc);
  3305. }
  3306. }
  3307. /* ======= Long Keys ======= */
  3308. #define XXH_STRIPE_LEN 64
  3309. #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
  3310. #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
  3311. #ifdef XXH_OLD_NAMES
  3312. # define STRIPE_LEN XXH_STRIPE_LEN
  3313. # define ACC_NB XXH_ACC_NB
  3314. #endif
  3315. XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
  3316. {
  3317. if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
  3318. XXH_memcpy(dst, &v64, sizeof(v64));
  3319. }
  3320. /* Several intrinsic functions below are supposed to accept __int64 as argument,
  3321. * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
  3322. * However, several environments do not define __int64 type,
  3323. * requiring a workaround.
  3324. */
  3325. #if !defined (__VMS) \
  3326. && (defined (__cplusplus) \
  3327. || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  3328. typedef int64_t xxh_i64;
  3329. #else
  3330. /* the following type must have a width of 64-bit */
  3331. typedef long long xxh_i64;
  3332. #endif
  3333. /*
  3334. * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
  3335. *
  3336. * It is a hardened version of UMAC, based off of FARSH's implementation.
  3337. *
  3338. * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
  3339. * implementations, and it is ridiculously fast.
  3340. *
  3341. * We harden it by mixing the original input to the accumulators as well as the product.
  3342. *
  3343. * This means that in the (relatively likely) case of a multiply by zero, the
  3344. * original input is preserved.
  3345. *
  3346. * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
  3347. * cross-pollination, as otherwise the upper and lower halves would be
  3348. * essentially independent.
  3349. *
  3350. * This doesn't matter on 64-bit hashes since they all get merged together in
  3351. * the end, so we skip the extra step.
  3352. *
  3353. * Both XXH3_64bits and XXH3_128bits use this subroutine.
  3354. */
  3355. #if (XXH_VECTOR == XXH_AVX512) \
  3356. || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
  3357. #ifndef XXH_TARGET_AVX512
  3358. # define XXH_TARGET_AVX512 /* disable attribute target */
  3359. #endif
  3360. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  3361. XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
  3362. const void* XXH_RESTRICT input,
  3363. const void* XXH_RESTRICT secret)
  3364. {
  3365. __m512i* const xacc = (__m512i *) acc;
  3366. XXH_ASSERT((((size_t)acc) & 63) == 0);
  3367. XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
  3368. {
  3369. /* data_vec = input[0]; */
  3370. __m512i const data_vec = _mm512_loadu_si512 (input);
  3371. /* key_vec = secret[0]; */
  3372. __m512i const key_vec = _mm512_loadu_si512 (secret);
  3373. /* data_key = data_vec ^ key_vec; */
  3374. __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
  3375. /* data_key_lo = data_key >> 32; */
  3376. __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
  3377. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  3378. __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
  3379. /* xacc[0] += swap(data_vec); */
  3380. __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
  3381. __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
  3382. /* xacc[0] += product; */
  3383. *xacc = _mm512_add_epi64(product, sum);
  3384. }
  3385. }
  3386. /*
  3387. * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
  3388. *
  3389. * Multiplication isn't perfect, as explained by Google in HighwayHash:
  3390. *
  3391. * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
  3392. * // varying degrees. In descending order of goodness, bytes
  3393. * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
  3394. * // As expected, the upper and lower bytes are much worse.
  3395. *
  3396. * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
  3397. *
  3398. * Since our algorithm uses a pseudorandom secret to add some variance into the
  3399. * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
  3400. *
  3401. * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
  3402. * extraction.
  3403. *
  3404. * Both XXH3_64bits and XXH3_128bits use this subroutine.
  3405. */
  3406. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  3407. XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3408. {
  3409. XXH_ASSERT((((size_t)acc) & 63) == 0);
  3410. XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
  3411. { __m512i* const xacc = (__m512i*) acc;
  3412. const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
  3413. /* xacc[0] ^= (xacc[0] >> 47) */
  3414. __m512i const acc_vec = *xacc;
  3415. __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
  3416. __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
  3417. /* xacc[0] ^= secret; */
  3418. __m512i const key_vec = _mm512_loadu_si512 (secret);
  3419. __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
  3420. /* xacc[0] *= XXH_PRIME32_1; */
  3421. __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
  3422. __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
  3423. __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
  3424. *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
  3425. }
  3426. }
  3427. XXH_FORCE_INLINE XXH_TARGET_AVX512 void
  3428. XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  3429. {
  3430. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
  3431. XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
  3432. XXH_ASSERT(((size_t)customSecret & 63) == 0);
  3433. (void)(&XXH_writeLE64);
  3434. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
  3435. __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
  3436. const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
  3437. __m512i* const dest = ( __m512i*) customSecret;
  3438. int i;
  3439. XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
  3440. XXH_ASSERT(((size_t)dest & 63) == 0);
  3441. for (i=0; i < nbRounds; ++i) {
  3442. /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
  3443. * this will warn "discards 'const' qualifier". */
  3444. union {
  3445. const __m512i* cp;
  3446. void* p;
  3447. } remote_const_void;
  3448. remote_const_void.cp = src + i;
  3449. dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
  3450. } }
  3451. }
  3452. #endif
  3453. #if (XXH_VECTOR == XXH_AVX2) \
  3454. || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
  3455. #ifndef XXH_TARGET_AVX2
  3456. # define XXH_TARGET_AVX2 /* disable attribute target */
  3457. #endif
  3458. XXH_FORCE_INLINE XXH_TARGET_AVX2 void
  3459. XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
  3460. const void* XXH_RESTRICT input,
  3461. const void* XXH_RESTRICT secret)
  3462. {
  3463. XXH_ASSERT((((size_t)acc) & 31) == 0);
  3464. { __m256i* const xacc = (__m256i *) acc;
  3465. /* Unaligned. This is mainly for pointer arithmetic, and because
  3466. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  3467. const __m256i* const xinput = (const __m256i *) input;
  3468. /* Unaligned. This is mainly for pointer arithmetic, and because
  3469. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  3470. const __m256i* const xsecret = (const __m256i *) secret;
  3471. size_t i;
  3472. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
  3473. /* data_vec = xinput[i]; */
  3474. __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
  3475. /* key_vec = xsecret[i]; */
  3476. __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
  3477. /* data_key = data_vec ^ key_vec; */
  3478. __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
  3479. /* data_key_lo = data_key >> 32; */
  3480. __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  3481. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  3482. __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
  3483. /* xacc[i] += swap(data_vec); */
  3484. __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
  3485. __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
  3486. /* xacc[i] += product; */
  3487. xacc[i] = _mm256_add_epi64(product, sum);
  3488. } }
  3489. }
  3490. XXH_FORCE_INLINE XXH_TARGET_AVX2 void
  3491. XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3492. {
  3493. XXH_ASSERT((((size_t)acc) & 31) == 0);
  3494. { __m256i* const xacc = (__m256i*) acc;
  3495. /* Unaligned. This is mainly for pointer arithmetic, and because
  3496. * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
  3497. const __m256i* const xsecret = (const __m256i *) secret;
  3498. const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
  3499. size_t i;
  3500. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
  3501. /* xacc[i] ^= (xacc[i] >> 47) */
  3502. __m256i const acc_vec = xacc[i];
  3503. __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
  3504. __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
  3505. /* xacc[i] ^= xsecret; */
  3506. __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
  3507. __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
  3508. /* xacc[i] *= XXH_PRIME32_1; */
  3509. __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  3510. __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
  3511. __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
  3512. xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
  3513. }
  3514. }
  3515. }
  3516. XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  3517. {
  3518. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
  3519. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
  3520. XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
  3521. (void)(&XXH_writeLE64);
  3522. XXH_PREFETCH(customSecret);
  3523. { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
  3524. const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
  3525. __m256i* dest = ( __m256i*) customSecret;
  3526. # if defined(__GNUC__) || defined(__clang__)
  3527. /*
  3528. * On GCC & Clang, marking 'dest' as modified will cause the compiler:
  3529. * - do not extract the secret from sse registers in the internal loop
  3530. * - use less common registers, and avoid pushing these reg into stack
  3531. */
  3532. XXH_COMPILER_GUARD(dest);
  3533. # endif
  3534. XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
  3535. XXH_ASSERT(((size_t)dest & 31) == 0);
  3536. /* GCC -O2 need unroll loop manually */
  3537. dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
  3538. dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
  3539. dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
  3540. dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
  3541. dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
  3542. dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
  3543. }
  3544. }
  3545. #endif
  3546. /* x86dispatch always generates SSE2 */
  3547. #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
  3548. #ifndef XXH_TARGET_SSE2
  3549. # define XXH_TARGET_SSE2 /* disable attribute target */
  3550. #endif
  3551. XXH_FORCE_INLINE XXH_TARGET_SSE2 void
  3552. XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
  3553. const void* XXH_RESTRICT input,
  3554. const void* XXH_RESTRICT secret)
  3555. {
  3556. /* SSE2 is just a half-scale version of the AVX2 version. */
  3557. XXH_ASSERT((((size_t)acc) & 15) == 0);
  3558. { __m128i* const xacc = (__m128i *) acc;
  3559. /* Unaligned. This is mainly for pointer arithmetic, and because
  3560. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  3561. const __m128i* const xinput = (const __m128i *) input;
  3562. /* Unaligned. This is mainly for pointer arithmetic, and because
  3563. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  3564. const __m128i* const xsecret = (const __m128i *) secret;
  3565. size_t i;
  3566. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
  3567. /* data_vec = xinput[i]; */
  3568. __m128i const data_vec = _mm_loadu_si128 (xinput+i);
  3569. /* key_vec = xsecret[i]; */
  3570. __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
  3571. /* data_key = data_vec ^ key_vec; */
  3572. __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
  3573. /* data_key_lo = data_key >> 32; */
  3574. __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  3575. /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
  3576. __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
  3577. /* xacc[i] += swap(data_vec); */
  3578. __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
  3579. __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
  3580. /* xacc[i] += product; */
  3581. xacc[i] = _mm_add_epi64(product, sum);
  3582. } }
  3583. }
  3584. XXH_FORCE_INLINE XXH_TARGET_SSE2 void
  3585. XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3586. {
  3587. XXH_ASSERT((((size_t)acc) & 15) == 0);
  3588. { __m128i* const xacc = (__m128i*) acc;
  3589. /* Unaligned. This is mainly for pointer arithmetic, and because
  3590. * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
  3591. const __m128i* const xsecret = (const __m128i *) secret;
  3592. const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
  3593. size_t i;
  3594. for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
  3595. /* xacc[i] ^= (xacc[i] >> 47) */
  3596. __m128i const acc_vec = xacc[i];
  3597. __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
  3598. __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
  3599. /* xacc[i] ^= xsecret[i]; */
  3600. __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
  3601. __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
  3602. /* xacc[i] *= XXH_PRIME32_1; */
  3603. __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
  3604. __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
  3605. __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
  3606. xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
  3607. }
  3608. }
  3609. }
  3610. XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  3611. {
  3612. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
  3613. (void)(&XXH_writeLE64);
  3614. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
  3615. # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
  3616. /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
  3617. XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
  3618. __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
  3619. # else
  3620. __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
  3621. # endif
  3622. int i;
  3623. const void* const src16 = XXH3_kSecret;
  3624. __m128i* dst16 = (__m128i*) customSecret;
  3625. # if defined(__GNUC__) || defined(__clang__)
  3626. /*
  3627. * On GCC & Clang, marking 'dest' as modified will cause the compiler:
  3628. * - do not extract the secret from sse registers in the internal loop
  3629. * - use less common registers, and avoid pushing these reg into stack
  3630. */
  3631. XXH_COMPILER_GUARD(dst16);
  3632. # endif
  3633. XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
  3634. XXH_ASSERT(((size_t)dst16 & 15) == 0);
  3635. for (i=0; i < nbRounds; ++i) {
  3636. dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
  3637. } }
  3638. }
  3639. #endif
  3640. #if (XXH_VECTOR == XXH_NEON)
  3641. XXH_FORCE_INLINE void
  3642. XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
  3643. const void* XXH_RESTRICT input,
  3644. const void* XXH_RESTRICT secret)
  3645. {
  3646. XXH_ASSERT((((size_t)acc) & 15) == 0);
  3647. {
  3648. uint64x2_t* const xacc = (uint64x2_t *) acc;
  3649. /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
  3650. uint8_t const* const xinput = (const uint8_t *) input;
  3651. uint8_t const* const xsecret = (const uint8_t *) secret;
  3652. size_t i;
  3653. for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
  3654. /* data_vec = xinput[i]; */
  3655. uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
  3656. /* key_vec = xsecret[i]; */
  3657. uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
  3658. uint64x2_t data_key;
  3659. uint32x2_t data_key_lo, data_key_hi;
  3660. /* xacc[i] += swap(data_vec); */
  3661. uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
  3662. uint64x2_t const swapped = vextq_u64(data64, data64, 1);
  3663. xacc[i] = vaddq_u64 (xacc[i], swapped);
  3664. /* data_key = data_vec ^ key_vec; */
  3665. data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
  3666. /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
  3667. * data_key_hi = (uint32x2_t) (data_key >> 32);
  3668. * data_key = UNDEFINED; */
  3669. XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
  3670. /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
  3671. xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
  3672. }
  3673. }
  3674. }
  3675. XXH_FORCE_INLINE void
  3676. XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3677. {
  3678. XXH_ASSERT((((size_t)acc) & 15) == 0);
  3679. { uint64x2_t* xacc = (uint64x2_t*) acc;
  3680. uint8_t const* xsecret = (uint8_t const*) secret;
  3681. uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
  3682. size_t i;
  3683. for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
  3684. /* xacc[i] ^= (xacc[i] >> 47); */
  3685. uint64x2_t acc_vec = xacc[i];
  3686. uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
  3687. uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
  3688. /* xacc[i] ^= xsecret[i]; */
  3689. uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16));
  3690. uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
  3691. /* xacc[i] *= XXH_PRIME32_1 */
  3692. uint32x2_t data_key_lo, data_key_hi;
  3693. /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
  3694. * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
  3695. * xacc[i] = UNDEFINED; */
  3696. XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
  3697. { /*
  3698. * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
  3699. *
  3700. * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
  3701. * incorrectly "optimize" this:
  3702. * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
  3703. * shifted = vshll_n_u32(tmp, 32);
  3704. * to this:
  3705. * tmp = "vmulq_u64"(a, b); // no such thing!
  3706. * shifted = vshlq_n_u64(tmp, 32);
  3707. *
  3708. * However, unlike SSE, Clang lacks a 64-bit multiply routine
  3709. * for NEON, and it scalarizes two 64-bit multiplies instead.
  3710. *
  3711. * vmull_u32 has the same timing as vmul_u32, and it avoids
  3712. * this bug completely.
  3713. * See https://bugs.llvm.org/show_bug.cgi?id=39967
  3714. */
  3715. uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
  3716. /* xacc[i] = prod_hi << 32; */
  3717. xacc[i] = vshlq_n_u64(prod_hi, 32);
  3718. /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
  3719. xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
  3720. }
  3721. } }
  3722. }
  3723. #endif
  3724. #if (XXH_VECTOR == XXH_VSX)
  3725. XXH_FORCE_INLINE void
  3726. XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
  3727. const void* XXH_RESTRICT input,
  3728. const void* XXH_RESTRICT secret)
  3729. {
  3730. /* presumed aligned */
  3731. unsigned long long* const xacc = (unsigned long long*) acc;
  3732. xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
  3733. xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
  3734. xxh_u64x2 const v32 = { 32, 32 };
  3735. size_t i;
  3736. for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
  3737. /* data_vec = xinput[i]; */
  3738. xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
  3739. /* key_vec = xsecret[i]; */
  3740. xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
  3741. xxh_u64x2 const data_key = data_vec ^ key_vec;
  3742. /* shuffled = (data_key << 32) | (data_key >> 32); */
  3743. xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
  3744. /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
  3745. xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
  3746. /* acc_vec = xacc[i]; */
  3747. xxh_u64x2 acc_vec = vec_xl(0, xacc + 2 * i);
  3748. acc_vec += product;
  3749. /* swap high and low halves */
  3750. #ifdef __s390x__
  3751. acc_vec += vec_permi(data_vec, data_vec, 2);
  3752. #else
  3753. acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
  3754. #endif
  3755. /* xacc[i] = acc_vec; */
  3756. vec_xst(acc_vec, 0, xacc + 2 * i);
  3757. }
  3758. }
  3759. XXH_FORCE_INLINE void
  3760. XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3761. {
  3762. XXH_ASSERT((((size_t)acc) & 15) == 0);
  3763. { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
  3764. const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
  3765. /* constants */
  3766. xxh_u64x2 const v32 = { 32, 32 };
  3767. xxh_u64x2 const v47 = { 47, 47 };
  3768. xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
  3769. size_t i;
  3770. for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
  3771. /* xacc[i] ^= (xacc[i] >> 47); */
  3772. xxh_u64x2 const acc_vec = xacc[i];
  3773. xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
  3774. /* xacc[i] ^= xsecret[i]; */
  3775. xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
  3776. xxh_u64x2 const data_key = data_vec ^ key_vec;
  3777. /* xacc[i] *= XXH_PRIME32_1 */
  3778. /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
  3779. xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
  3780. /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
  3781. xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
  3782. xacc[i] = prod_odd + (prod_even << v32);
  3783. } }
  3784. }
  3785. #endif
  3786. /* scalar variants - universal */
  3787. XXH_FORCE_INLINE void
  3788. XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
  3789. const void* XXH_RESTRICT input,
  3790. const void* XXH_RESTRICT secret)
  3791. {
  3792. xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
  3793. const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */
  3794. const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
  3795. size_t i;
  3796. XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
  3797. for (i=0; i < XXH_ACC_NB; i++) {
  3798. xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
  3799. xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
  3800. xacc[i ^ 1] += data_val; /* swap adjacent lanes */
  3801. xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
  3802. }
  3803. }
  3804. XXH_FORCE_INLINE void
  3805. XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
  3806. {
  3807. xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
  3808. const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
  3809. size_t i;
  3810. XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
  3811. for (i=0; i < XXH_ACC_NB; i++) {
  3812. xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
  3813. xxh_u64 acc64 = xacc[i];
  3814. acc64 = XXH_xorshift64(acc64, 47);
  3815. acc64 ^= key64;
  3816. acc64 *= XXH_PRIME32_1;
  3817. xacc[i] = acc64;
  3818. }
  3819. }
  3820. XXH_FORCE_INLINE void
  3821. XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
  3822. {
  3823. /*
  3824. * We need a separate pointer for the hack below,
  3825. * which requires a non-const pointer.
  3826. * Any decent compiler will optimize this out otherwise.
  3827. */
  3828. const xxh_u8* kSecretPtr = XXH3_kSecret;
  3829. XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
  3830. #if defined(__clang__) && defined(__aarch64__)
  3831. /*
  3832. * UGLY HACK:
  3833. * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
  3834. * placed sequentially, in order, at the top of the unrolled loop.
  3835. *
  3836. * While MOVK is great for generating constants (2 cycles for a 64-bit
  3837. * constant compared to 4 cycles for LDR), long MOVK chains stall the
  3838. * integer pipelines:
  3839. * I L S
  3840. * MOVK
  3841. * MOVK
  3842. * MOVK
  3843. * MOVK
  3844. * ADD
  3845. * SUB STR
  3846. * STR
  3847. * By forcing loads from memory (as the asm line causes Clang to assume
  3848. * that XXH3_kSecretPtr has been changed), the pipelines are used more
  3849. * efficiently:
  3850. * I L S
  3851. * LDR
  3852. * ADD LDR
  3853. * SUB STR
  3854. * STR
  3855. * XXH3_64bits_withSeed, len == 256, Snapdragon 835
  3856. * without hack: 2654.4 MB/s
  3857. * with hack: 3202.9 MB/s
  3858. */
  3859. XXH_COMPILER_GUARD(kSecretPtr);
  3860. #endif
  3861. /*
  3862. * Note: in debug mode, this overrides the asm optimization
  3863. * and Clang will emit MOVK chains again.
  3864. */
  3865. XXH_ASSERT(kSecretPtr == XXH3_kSecret);
  3866. { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
  3867. int i;
  3868. for (i=0; i < nbRounds; i++) {
  3869. /*
  3870. * The asm hack causes Clang to assume that kSecretPtr aliases with
  3871. * customSecret, and on aarch64, this prevented LDP from merging two
  3872. * loads together for free. Putting the loads together before the stores
  3873. * properly generates LDP.
  3874. */
  3875. xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
  3876. xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
  3877. XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
  3878. XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
  3879. } }
  3880. }
  3881. typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
  3882. typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
  3883. typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
  3884. #if (XXH_VECTOR == XXH_AVX512)
  3885. #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
  3886. #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
  3887. #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
  3888. #elif (XXH_VECTOR == XXH_AVX2)
  3889. #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
  3890. #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
  3891. #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
  3892. #elif (XXH_VECTOR == XXH_SSE2)
  3893. #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
  3894. #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
  3895. #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
  3896. #elif (XXH_VECTOR == XXH_NEON)
  3897. #define XXH3_accumulate_512 XXH3_accumulate_512_neon
  3898. #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
  3899. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  3900. #elif (XXH_VECTOR == XXH_VSX)
  3901. #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
  3902. #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
  3903. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  3904. #else /* scalar */
  3905. #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
  3906. #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
  3907. #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
  3908. #endif
  3909. #ifndef XXH_PREFETCH_DIST
  3910. # ifdef __clang__
  3911. # define XXH_PREFETCH_DIST 320
  3912. # else
  3913. # if (XXH_VECTOR == XXH_AVX512)
  3914. # define XXH_PREFETCH_DIST 512
  3915. # else
  3916. # define XXH_PREFETCH_DIST 384
  3917. # endif
  3918. # endif /* __clang__ */
  3919. #endif /* XXH_PREFETCH_DIST */
  3920. /*
  3921. * XXH3_accumulate()
  3922. * Loops over XXH3_accumulate_512().
  3923. * Assumption: nbStripes will not overflow the secret size
  3924. */
  3925. XXH_FORCE_INLINE void
  3926. XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
  3927. const xxh_u8* XXH_RESTRICT input,
  3928. const xxh_u8* XXH_RESTRICT secret,
  3929. size_t nbStripes,
  3930. XXH3_f_accumulate_512 f_acc512)
  3931. {
  3932. size_t n;
  3933. for (n = 0; n < nbStripes; n++ ) {
  3934. const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
  3935. XXH_PREFETCH(in + XXH_PREFETCH_DIST);
  3936. f_acc512(acc,
  3937. in,
  3938. secret + n*XXH_SECRET_CONSUME_RATE);
  3939. }
  3940. }
  3941. XXH_FORCE_INLINE void
  3942. XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
  3943. const xxh_u8* XXH_RESTRICT input, size_t len,
  3944. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  3945. XXH3_f_accumulate_512 f_acc512,
  3946. XXH3_f_scrambleAcc f_scramble)
  3947. {
  3948. size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
  3949. size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
  3950. size_t const nb_blocks = (len - 1) / block_len;
  3951. size_t n;
  3952. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  3953. for (n = 0; n < nb_blocks; n++) {
  3954. XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
  3955. f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
  3956. }
  3957. /* last partial block */
  3958. XXH_ASSERT(len > XXH_STRIPE_LEN);
  3959. { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
  3960. XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
  3961. XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
  3962. /* last stripe */
  3963. { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
  3964. #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
  3965. f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
  3966. } }
  3967. }
  3968. XXH_FORCE_INLINE xxh_u64
  3969. XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
  3970. {
  3971. return XXH3_mul128_fold64(
  3972. acc[0] ^ XXH_readLE64(secret),
  3973. acc[1] ^ XXH_readLE64(secret+8) );
  3974. }
  3975. static XXH64_hash_t
  3976. XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
  3977. {
  3978. xxh_u64 result64 = start;
  3979. size_t i = 0;
  3980. for (i = 0; i < 4; i++) {
  3981. result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
  3982. #if defined(__clang__) /* Clang */ \
  3983. && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
  3984. && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
  3985. && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
  3986. /*
  3987. * UGLY HACK:
  3988. * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
  3989. * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
  3990. * XXH3_64bits, len == 256, Snapdragon 835:
  3991. * without hack: 2063.7 MB/s
  3992. * with hack: 2560.7 MB/s
  3993. */
  3994. XXH_COMPILER_GUARD(result64);
  3995. #endif
  3996. }
  3997. return XXH3_avalanche(result64);
  3998. }
  3999. #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
  4000. XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
  4001. XXH_FORCE_INLINE XXH64_hash_t
  4002. XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
  4003. const void* XXH_RESTRICT secret, size_t secretSize,
  4004. XXH3_f_accumulate_512 f_acc512,
  4005. XXH3_f_scrambleAcc f_scramble)
  4006. {
  4007. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
  4008. XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
  4009. /* converge into final hash */
  4010. XXH_STATIC_ASSERT(sizeof(acc) == 64);
  4011. /* do not align on 8, so that the secret is different from the accumulator */
  4012. #define XXH_SECRET_MERGEACCS_START 11
  4013. XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  4014. return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
  4015. }
  4016. /*
  4017. * It's important for performance to transmit secret's size (when it's static)
  4018. * so that the compiler can properly optimize the vectorized loop.
  4019. * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
  4020. */
  4021. XXH_FORCE_INLINE XXH64_hash_t
  4022. XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
  4023. XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
  4024. {
  4025. (void)seed64;
  4026. return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
  4027. }
  4028. /*
  4029. * It's preferable for performance that XXH3_hashLong is not inlined,
  4030. * as it results in a smaller function for small data, easier to the instruction cache.
  4031. * Note that inside this no_inline function, we do inline the internal loop,
  4032. * and provide a statically defined secret size to allow optimization of vector loop.
  4033. */
  4034. XXH_NO_INLINE XXH64_hash_t
  4035. XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
  4036. XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
  4037. {
  4038. (void)seed64; (void)secret; (void)secretLen;
  4039. return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
  4040. }
  4041. /*
  4042. * XXH3_hashLong_64b_withSeed():
  4043. * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
  4044. * and then use this key for long mode hashing.
  4045. *
  4046. * This operation is decently fast but nonetheless costs a little bit of time.
  4047. * Try to avoid it whenever possible (typically when seed==0).
  4048. *
  4049. * It's important for performance that XXH3_hashLong is not inlined. Not sure
  4050. * why (uop cache maybe?), but the difference is large and easily measurable.
  4051. */
  4052. XXH_FORCE_INLINE XXH64_hash_t
  4053. XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
  4054. XXH64_hash_t seed,
  4055. XXH3_f_accumulate_512 f_acc512,
  4056. XXH3_f_scrambleAcc f_scramble,
  4057. XXH3_f_initCustomSecret f_initSec)
  4058. {
  4059. if (seed == 0)
  4060. return XXH3_hashLong_64b_internal(input, len,
  4061. XXH3_kSecret, sizeof(XXH3_kSecret),
  4062. f_acc512, f_scramble);
  4063. { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  4064. f_initSec(secret, seed);
  4065. return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
  4066. f_acc512, f_scramble);
  4067. }
  4068. }
  4069. /*
  4070. * It's important for performance that XXH3_hashLong is not inlined.
  4071. */
  4072. XXH_NO_INLINE XXH64_hash_t
  4073. XXH3_hashLong_64b_withSeed(const void* input, size_t len,
  4074. XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
  4075. {
  4076. (void)secret; (void)secretLen;
  4077. return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
  4078. XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
  4079. }
  4080. typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
  4081. XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
  4082. XXH_FORCE_INLINE XXH64_hash_t
  4083. XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
  4084. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
  4085. XXH3_hashLong64_f f_hashLong)
  4086. {
  4087. XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
  4088. /*
  4089. * If an action is to be taken if `secretLen` condition is not respected,
  4090. * it should be done here.
  4091. * For now, it's a contract pre-condition.
  4092. * Adding a check and a branch here would cost performance at every hash.
  4093. * Also, note that function signature doesn't offer room to return an error.
  4094. */
  4095. if (len <= 16)
  4096. return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
  4097. if (len <= 128)
  4098. return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  4099. if (len <= XXH3_MIDSIZE_MAX)
  4100. return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  4101. return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
  4102. }
  4103. /* === Public entry point === */
  4104. /*! @ingroup xxh3_family */
  4105. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
  4106. {
  4107. return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
  4108. }
  4109. /*! @ingroup xxh3_family */
  4110. XXH_PUBLIC_API XXH64_hash_t
  4111. XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
  4112. {
  4113. return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
  4114. }
  4115. /*! @ingroup xxh3_family */
  4116. XXH_PUBLIC_API XXH64_hash_t
  4117. XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
  4118. {
  4119. return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
  4120. }
  4121. XXH_PUBLIC_API XXH64_hash_t
  4122. XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
  4123. {
  4124. if (len <= XXH3_MIDSIZE_MAX)
  4125. return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
  4126. return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
  4127. }
  4128. /* === XXH3 streaming === */
  4129. /*
  4130. * Malloc's a pointer that is always aligned to align.
  4131. *
  4132. * This must be freed with `XXH_alignedFree()`.
  4133. *
  4134. * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
  4135. * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
  4136. * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
  4137. *
  4138. * This underalignment previously caused a rather obvious crash which went
  4139. * completely unnoticed due to XXH3_createState() not actually being tested.
  4140. * Credit to RedSpah for noticing this bug.
  4141. *
  4142. * The alignment is done manually: Functions like posix_memalign or _mm_malloc
  4143. * are avoided: To maintain portability, we would have to write a fallback
  4144. * like this anyways, and besides, testing for the existence of library
  4145. * functions without relying on external build tools is impossible.
  4146. *
  4147. * The method is simple: Overallocate, manually align, and store the offset
  4148. * to the original behind the returned pointer.
  4149. *
  4150. * Align must be a power of 2 and 8 <= align <= 128.
  4151. */
  4152. static void* XXH_alignedMalloc(size_t s, size_t align)
  4153. {
  4154. XXH_ASSERT(align <= 128 && align >= 8); /* range check */
  4155. XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
  4156. XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
  4157. { /* Overallocate to make room for manual realignment and an offset byte */
  4158. xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
  4159. if (base != NULL) {
  4160. /*
  4161. * Get the offset needed to align this pointer.
  4162. *
  4163. * Even if the returned pointer is aligned, there will always be
  4164. * at least one byte to store the offset to the original pointer.
  4165. */
  4166. size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
  4167. /* Add the offset for the now-aligned pointer */
  4168. xxh_u8* ptr = base + offset;
  4169. XXH_ASSERT((size_t)ptr % align == 0);
  4170. /* Store the offset immediately before the returned pointer. */
  4171. ptr[-1] = (xxh_u8)offset;
  4172. return ptr;
  4173. }
  4174. return NULL;
  4175. }
  4176. }
  4177. /*
  4178. * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
  4179. * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
  4180. */
  4181. static void XXH_alignedFree(void* p)
  4182. {
  4183. if (p != NULL) {
  4184. xxh_u8* ptr = (xxh_u8*)p;
  4185. /* Get the offset byte we added in XXH_malloc. */
  4186. xxh_u8 offset = ptr[-1];
  4187. /* Free the original malloc'd pointer */
  4188. xxh_u8* base = ptr - offset;
  4189. XXH_free(base);
  4190. }
  4191. }
  4192. /*! @ingroup xxh3_family */
  4193. XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
  4194. {
  4195. XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
  4196. if (state==NULL) return NULL;
  4197. XXH3_INITSTATE(state);
  4198. return state;
  4199. }
  4200. /*! @ingroup xxh3_family */
  4201. XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
  4202. {
  4203. XXH_alignedFree(statePtr);
  4204. return XXH_OK;
  4205. }
  4206. /*! @ingroup xxh3_family */
  4207. XXH_PUBLIC_API void
  4208. XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
  4209. {
  4210. XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
  4211. }
  4212. static void
  4213. XXH3_reset_internal(XXH3_state_t* statePtr,
  4214. XXH64_hash_t seed,
  4215. const void* secret, size_t secretSize)
  4216. {
  4217. size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
  4218. size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
  4219. XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
  4220. XXH_ASSERT(statePtr != NULL);
  4221. /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
  4222. memset((char*)statePtr + initStart, 0, initLength);
  4223. statePtr->acc[0] = XXH_PRIME32_3;
  4224. statePtr->acc[1] = XXH_PRIME64_1;
  4225. statePtr->acc[2] = XXH_PRIME64_2;
  4226. statePtr->acc[3] = XXH_PRIME64_3;
  4227. statePtr->acc[4] = XXH_PRIME64_4;
  4228. statePtr->acc[5] = XXH_PRIME32_2;
  4229. statePtr->acc[6] = XXH_PRIME64_5;
  4230. statePtr->acc[7] = XXH_PRIME32_1;
  4231. statePtr->seed = seed;
  4232. statePtr->useSeed = (seed != 0);
  4233. statePtr->extSecret = (const unsigned char*)secret;
  4234. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  4235. statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
  4236. statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
  4237. }
  4238. /*! @ingroup xxh3_family */
  4239. XXH_PUBLIC_API XXH_errorcode
  4240. XXH3_64bits_reset(XXH3_state_t* statePtr)
  4241. {
  4242. if (statePtr == NULL) return XXH_ERROR;
  4243. XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
  4244. return XXH_OK;
  4245. }
  4246. /*! @ingroup xxh3_family */
  4247. XXH_PUBLIC_API XXH_errorcode
  4248. XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
  4249. {
  4250. if (statePtr == NULL) return XXH_ERROR;
  4251. XXH3_reset_internal(statePtr, 0, secret, secretSize);
  4252. if (secret == NULL) return XXH_ERROR;
  4253. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  4254. return XXH_OK;
  4255. }
  4256. /*! @ingroup xxh3_family */
  4257. XXH_PUBLIC_API XXH_errorcode
  4258. XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
  4259. {
  4260. if (statePtr == NULL) return XXH_ERROR;
  4261. if (seed==0) return XXH3_64bits_reset(statePtr);
  4262. if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
  4263. XXH3_initCustomSecret(statePtr->customSecret, seed);
  4264. XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
  4265. return XXH_OK;
  4266. }
  4267. /*! @ingroup xxh3_family */
  4268. XXH_PUBLIC_API XXH_errorcode
  4269. XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
  4270. {
  4271. if (statePtr == NULL) return XXH_ERROR;
  4272. if (secret == NULL) return XXH_ERROR;
  4273. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  4274. XXH3_reset_internal(statePtr, seed64, secret, secretSize);
  4275. statePtr->useSeed = 1; /* always, even if seed64==0 */
  4276. return XXH_OK;
  4277. }
  4278. /* Note : when XXH3_consumeStripes() is invoked,
  4279. * there must be a guarantee that at least one more byte must be consumed from input
  4280. * so that the function can blindly consume all stripes using the "normal" secret segment */
  4281. XXH_FORCE_INLINE void
  4282. XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
  4283. size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
  4284. const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
  4285. const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
  4286. XXH3_f_accumulate_512 f_acc512,
  4287. XXH3_f_scrambleAcc f_scramble)
  4288. {
  4289. XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
  4290. XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
  4291. if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
  4292. /* need a scrambling operation */
  4293. size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
  4294. size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
  4295. XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
  4296. f_scramble(acc, secret + secretLimit);
  4297. XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
  4298. *nbStripesSoFarPtr = nbStripesAfterBlock;
  4299. } else {
  4300. XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
  4301. *nbStripesSoFarPtr += nbStripes;
  4302. }
  4303. }
  4304. #ifndef XXH3_STREAM_USE_STACK
  4305. # ifndef __clang__ /* clang doesn't need additional stack space */
  4306. # define XXH3_STREAM_USE_STACK 1
  4307. # endif
  4308. #endif
  4309. /*
  4310. * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
  4311. */
  4312. XXH_FORCE_INLINE XXH_errorcode
  4313. XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
  4314. const xxh_u8* XXH_RESTRICT input, size_t len,
  4315. XXH3_f_accumulate_512 f_acc512,
  4316. XXH3_f_scrambleAcc f_scramble)
  4317. {
  4318. if (input==NULL) {
  4319. XXH_ASSERT(len == 0);
  4320. return XXH_OK;
  4321. }
  4322. XXH_ASSERT(state != NULL);
  4323. { const xxh_u8* const bEnd = input + len;
  4324. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  4325. #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
  4326. /* For some reason, gcc and MSVC seem to suffer greatly
  4327. * when operating accumulators directly into state.
  4328. * Operating into stack space seems to enable proper optimization.
  4329. * clang, on the other hand, doesn't seem to need this trick */
  4330. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
  4331. #else
  4332. xxh_u64* XXH_RESTRICT const acc = state->acc;
  4333. #endif
  4334. state->totalLen += len;
  4335. XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
  4336. /* small input : just fill in tmp buffer */
  4337. if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
  4338. XXH_memcpy(state->buffer + state->bufferedSize, input, len);
  4339. state->bufferedSize += (XXH32_hash_t)len;
  4340. return XXH_OK;
  4341. }
  4342. /* total input is now > XXH3_INTERNALBUFFER_SIZE */
  4343. #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
  4344. XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
  4345. /*
  4346. * Internal buffer is partially filled (always, except at beginning)
  4347. * Complete it, then consume it.
  4348. */
  4349. if (state->bufferedSize) {
  4350. size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
  4351. XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
  4352. input += loadSize;
  4353. XXH3_consumeStripes(acc,
  4354. &state->nbStripesSoFar, state->nbStripesPerBlock,
  4355. state->buffer, XXH3_INTERNALBUFFER_STRIPES,
  4356. secret, state->secretLimit,
  4357. f_acc512, f_scramble);
  4358. state->bufferedSize = 0;
  4359. }
  4360. XXH_ASSERT(input < bEnd);
  4361. /* large input to consume : ingest per full block */
  4362. if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
  4363. size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
  4364. XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
  4365. /* join to current block's end */
  4366. { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
  4367. XXH_ASSERT(nbStripes <= nbStripes);
  4368. XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
  4369. f_scramble(acc, secret + state->secretLimit);
  4370. state->nbStripesSoFar = 0;
  4371. input += nbStripesToEnd * XXH_STRIPE_LEN;
  4372. nbStripes -= nbStripesToEnd;
  4373. }
  4374. /* consume per entire blocks */
  4375. while(nbStripes >= state->nbStripesPerBlock) {
  4376. XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
  4377. f_scramble(acc, secret + state->secretLimit);
  4378. input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
  4379. nbStripes -= state->nbStripesPerBlock;
  4380. }
  4381. /* consume last partial block */
  4382. XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
  4383. input += nbStripes * XXH_STRIPE_LEN;
  4384. XXH_ASSERT(input < bEnd); /* at least some bytes left */
  4385. state->nbStripesSoFar = nbStripes;
  4386. /* buffer predecessor of last partial stripe */
  4387. XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
  4388. XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
  4389. } else {
  4390. /* content to consume <= block size */
  4391. /* Consume input by a multiple of internal buffer size */
  4392. if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
  4393. const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
  4394. do {
  4395. XXH3_consumeStripes(acc,
  4396. &state->nbStripesSoFar, state->nbStripesPerBlock,
  4397. input, XXH3_INTERNALBUFFER_STRIPES,
  4398. secret, state->secretLimit,
  4399. f_acc512, f_scramble);
  4400. input += XXH3_INTERNALBUFFER_SIZE;
  4401. } while (input<limit);
  4402. /* buffer predecessor of last partial stripe */
  4403. XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
  4404. }
  4405. }
  4406. /* Some remaining input (always) : buffer it */
  4407. XXH_ASSERT(input < bEnd);
  4408. XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
  4409. XXH_ASSERT(state->bufferedSize == 0);
  4410. XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
  4411. state->bufferedSize = (XXH32_hash_t)(bEnd-input);
  4412. #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
  4413. /* save stack accumulators into state */
  4414. memcpy(state->acc, acc, sizeof(acc));
  4415. #endif
  4416. }
  4417. return XXH_OK;
  4418. }
  4419. /*! @ingroup xxh3_family */
  4420. XXH_PUBLIC_API XXH_errorcode
  4421. XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
  4422. {
  4423. return XXH3_update(state, (const xxh_u8*)input, len,
  4424. XXH3_accumulate_512, XXH3_scrambleAcc);
  4425. }
  4426. XXH_FORCE_INLINE void
  4427. XXH3_digest_long (XXH64_hash_t* acc,
  4428. const XXH3_state_t* state,
  4429. const unsigned char* secret)
  4430. {
  4431. /*
  4432. * Digest on a local copy. This way, the state remains unaltered, and it can
  4433. * continue ingesting more input afterwards.
  4434. */
  4435. XXH_memcpy(acc, state->acc, sizeof(state->acc));
  4436. if (state->bufferedSize >= XXH_STRIPE_LEN) {
  4437. size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
  4438. size_t nbStripesSoFar = state->nbStripesSoFar;
  4439. XXH3_consumeStripes(acc,
  4440. &nbStripesSoFar, state->nbStripesPerBlock,
  4441. state->buffer, nbStripes,
  4442. secret, state->secretLimit,
  4443. XXH3_accumulate_512, XXH3_scrambleAcc);
  4444. /* last stripe */
  4445. XXH3_accumulate_512(acc,
  4446. state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
  4447. secret + state->secretLimit - XXH_SECRET_LASTACC_START);
  4448. } else { /* bufferedSize < XXH_STRIPE_LEN */
  4449. xxh_u8 lastStripe[XXH_STRIPE_LEN];
  4450. size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
  4451. XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
  4452. XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
  4453. XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
  4454. XXH3_accumulate_512(acc,
  4455. lastStripe,
  4456. secret + state->secretLimit - XXH_SECRET_LASTACC_START);
  4457. }
  4458. }
  4459. /*! @ingroup xxh3_family */
  4460. XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
  4461. {
  4462. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  4463. if (state->totalLen > XXH3_MIDSIZE_MAX) {
  4464. XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
  4465. XXH3_digest_long(acc, state, secret);
  4466. return XXH3_mergeAccs(acc,
  4467. secret + XXH_SECRET_MERGEACCS_START,
  4468. (xxh_u64)state->totalLen * XXH_PRIME64_1);
  4469. }
  4470. /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
  4471. if (state->useSeed)
  4472. return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
  4473. return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
  4474. secret, state->secretLimit + XXH_STRIPE_LEN);
  4475. }
  4476. /* ==========================================
  4477. * XXH3 128 bits (a.k.a XXH128)
  4478. * ==========================================
  4479. * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
  4480. * even without counting the significantly larger output size.
  4481. *
  4482. * For example, extra steps are taken to avoid the seed-dependent collisions
  4483. * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
  4484. *
  4485. * This strength naturally comes at the cost of some speed, especially on short
  4486. * lengths. Note that longer hashes are about as fast as the 64-bit version
  4487. * due to it using only a slight modification of the 64-bit loop.
  4488. *
  4489. * XXH128 is also more oriented towards 64-bit machines. It is still extremely
  4490. * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
  4491. */
  4492. XXH_FORCE_INLINE XXH128_hash_t
  4493. XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4494. {
  4495. /* A doubled version of 1to3_64b with different constants. */
  4496. XXH_ASSERT(input != NULL);
  4497. XXH_ASSERT(1 <= len && len <= 3);
  4498. XXH_ASSERT(secret != NULL);
  4499. /*
  4500. * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
  4501. * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
  4502. * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
  4503. */
  4504. { xxh_u8 const c1 = input[0];
  4505. xxh_u8 const c2 = input[len >> 1];
  4506. xxh_u8 const c3 = input[len - 1];
  4507. xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
  4508. | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
  4509. xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
  4510. xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
  4511. xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
  4512. xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
  4513. xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
  4514. XXH128_hash_t h128;
  4515. h128.low64 = XXH64_avalanche(keyed_lo);
  4516. h128.high64 = XXH64_avalanche(keyed_hi);
  4517. return h128;
  4518. }
  4519. }
  4520. XXH_FORCE_INLINE XXH128_hash_t
  4521. XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4522. {
  4523. XXH_ASSERT(input != NULL);
  4524. XXH_ASSERT(secret != NULL);
  4525. XXH_ASSERT(4 <= len && len <= 8);
  4526. seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
  4527. { xxh_u32 const input_lo = XXH_readLE32(input);
  4528. xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
  4529. xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
  4530. xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
  4531. xxh_u64 const keyed = input_64 ^ bitflip;
  4532. /* Shift len to the left to ensure it is even, this avoids even multiplies. */
  4533. XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
  4534. m128.high64 += (m128.low64 << 1);
  4535. m128.low64 ^= (m128.high64 >> 3);
  4536. m128.low64 = XXH_xorshift64(m128.low64, 35);
  4537. m128.low64 *= 0x9FB21C651E98DF25ULL;
  4538. m128.low64 = XXH_xorshift64(m128.low64, 28);
  4539. m128.high64 = XXH3_avalanche(m128.high64);
  4540. return m128;
  4541. }
  4542. }
  4543. XXH_FORCE_INLINE XXH128_hash_t
  4544. XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4545. {
  4546. XXH_ASSERT(input != NULL);
  4547. XXH_ASSERT(secret != NULL);
  4548. XXH_ASSERT(9 <= len && len <= 16);
  4549. { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
  4550. xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
  4551. xxh_u64 const input_lo = XXH_readLE64(input);
  4552. xxh_u64 input_hi = XXH_readLE64(input + len - 8);
  4553. XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
  4554. /*
  4555. * Put len in the middle of m128 to ensure that the length gets mixed to
  4556. * both the low and high bits in the 128x64 multiply below.
  4557. */
  4558. m128.low64 += (xxh_u64)(len - 1) << 54;
  4559. input_hi ^= bitfliph;
  4560. /*
  4561. * Add the high 32 bits of input_hi to the high 32 bits of m128, then
  4562. * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
  4563. * the high 64 bits of m128.
  4564. *
  4565. * The best approach to this operation is different on 32-bit and 64-bit.
  4566. */
  4567. if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
  4568. /*
  4569. * 32-bit optimized version, which is more readable.
  4570. *
  4571. * On 32-bit, it removes an ADC and delays a dependency between the two
  4572. * halves of m128.high64, but it generates an extra mask on 64-bit.
  4573. */
  4574. m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
  4575. } else {
  4576. /*
  4577. * 64-bit optimized (albeit more confusing) version.
  4578. *
  4579. * Uses some properties of addition and multiplication to remove the mask:
  4580. *
  4581. * Let:
  4582. * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
  4583. * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
  4584. * c = XXH_PRIME32_2
  4585. *
  4586. * a + (b * c)
  4587. * Inverse Property: x + y - x == y
  4588. * a + (b * (1 + c - 1))
  4589. * Distributive Property: x * (y + z) == (x * y) + (x * z)
  4590. * a + (b * 1) + (b * (c - 1))
  4591. * Identity Property: x * 1 == x
  4592. * a + b + (b * (c - 1))
  4593. *
  4594. * Substitute a, b, and c:
  4595. * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
  4596. *
  4597. * Since input_hi.hi + input_hi.lo == input_hi, we get this:
  4598. * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
  4599. */
  4600. m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
  4601. }
  4602. /* m128 ^= XXH_swap64(m128 >> 64); */
  4603. m128.low64 ^= XXH_swap64(m128.high64);
  4604. { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
  4605. XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
  4606. h128.high64 += m128.high64 * XXH_PRIME64_2;
  4607. h128.low64 = XXH3_avalanche(h128.low64);
  4608. h128.high64 = XXH3_avalanche(h128.high64);
  4609. return h128;
  4610. } }
  4611. }
  4612. /*
  4613. * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
  4614. */
  4615. XXH_FORCE_INLINE XXH128_hash_t
  4616. XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
  4617. {
  4618. XXH_ASSERT(len <= 16);
  4619. { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
  4620. if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
  4621. if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
  4622. { XXH128_hash_t h128;
  4623. xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
  4624. xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
  4625. h128.low64 = XXH64_avalanche(seed ^ bitflipl);
  4626. h128.high64 = XXH64_avalanche( seed ^ bitfliph);
  4627. return h128;
  4628. } }
  4629. }
  4630. /*
  4631. * A bit slower than XXH3_mix16B, but handles multiply by zero better.
  4632. */
  4633. XXH_FORCE_INLINE XXH128_hash_t
  4634. XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
  4635. const xxh_u8* secret, XXH64_hash_t seed)
  4636. {
  4637. acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
  4638. acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
  4639. acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
  4640. acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
  4641. return acc;
  4642. }
  4643. XXH_FORCE_INLINE XXH128_hash_t
  4644. XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
  4645. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  4646. XXH64_hash_t seed)
  4647. {
  4648. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  4649. XXH_ASSERT(16 < len && len <= 128);
  4650. { XXH128_hash_t acc;
  4651. acc.low64 = len * XXH_PRIME64_1;
  4652. acc.high64 = 0;
  4653. if (len > 32) {
  4654. if (len > 64) {
  4655. if (len > 96) {
  4656. acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
  4657. }
  4658. acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
  4659. }
  4660. acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
  4661. }
  4662. acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
  4663. { XXH128_hash_t h128;
  4664. h128.low64 = acc.low64 + acc.high64;
  4665. h128.high64 = (acc.low64 * XXH_PRIME64_1)
  4666. + (acc.high64 * XXH_PRIME64_4)
  4667. + ((len - seed) * XXH_PRIME64_2);
  4668. h128.low64 = XXH3_avalanche(h128.low64);
  4669. h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
  4670. return h128;
  4671. }
  4672. }
  4673. }
  4674. XXH_NO_INLINE XXH128_hash_t
  4675. XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
  4676. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  4677. XXH64_hash_t seed)
  4678. {
  4679. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
  4680. XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
  4681. { XXH128_hash_t acc;
  4682. int const nbRounds = (int)len / 32;
  4683. int i;
  4684. acc.low64 = len * XXH_PRIME64_1;
  4685. acc.high64 = 0;
  4686. for (i=0; i<4; i++) {
  4687. acc = XXH128_mix32B(acc,
  4688. input + (32 * i),
  4689. input + (32 * i) + 16,
  4690. secret + (32 * i),
  4691. seed);
  4692. }
  4693. acc.low64 = XXH3_avalanche(acc.low64);
  4694. acc.high64 = XXH3_avalanche(acc.high64);
  4695. XXH_ASSERT(nbRounds >= 4);
  4696. for (i=4 ; i < nbRounds; i++) {
  4697. acc = XXH128_mix32B(acc,
  4698. input + (32 * i),
  4699. input + (32 * i) + 16,
  4700. secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
  4701. seed);
  4702. }
  4703. /* last bytes */
  4704. acc = XXH128_mix32B(acc,
  4705. input + len - 16,
  4706. input + len - 32,
  4707. secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
  4708. 0ULL - seed);
  4709. { XXH128_hash_t h128;
  4710. h128.low64 = acc.low64 + acc.high64;
  4711. h128.high64 = (acc.low64 * XXH_PRIME64_1)
  4712. + (acc.high64 * XXH_PRIME64_4)
  4713. + ((len - seed) * XXH_PRIME64_2);
  4714. h128.low64 = XXH3_avalanche(h128.low64);
  4715. h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
  4716. return h128;
  4717. }
  4718. }
  4719. }
  4720. XXH_FORCE_INLINE XXH128_hash_t
  4721. XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
  4722. const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
  4723. XXH3_f_accumulate_512 f_acc512,
  4724. XXH3_f_scrambleAcc f_scramble)
  4725. {
  4726. XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
  4727. XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
  4728. /* converge into final hash */
  4729. XXH_STATIC_ASSERT(sizeof(acc) == 64);
  4730. XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  4731. { XXH128_hash_t h128;
  4732. h128.low64 = XXH3_mergeAccs(acc,
  4733. secret + XXH_SECRET_MERGEACCS_START,
  4734. (xxh_u64)len * XXH_PRIME64_1);
  4735. h128.high64 = XXH3_mergeAccs(acc,
  4736. secret + secretSize
  4737. - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
  4738. ~((xxh_u64)len * XXH_PRIME64_2));
  4739. return h128;
  4740. }
  4741. }
  4742. /*
  4743. * It's important for performance that XXH3_hashLong is not inlined.
  4744. */
  4745. XXH_NO_INLINE XXH128_hash_t
  4746. XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
  4747. XXH64_hash_t seed64,
  4748. const void* XXH_RESTRICT secret, size_t secretLen)
  4749. {
  4750. (void)seed64; (void)secret; (void)secretLen;
  4751. return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
  4752. XXH3_accumulate_512, XXH3_scrambleAcc);
  4753. }
  4754. /*
  4755. * It's important for performance to pass @secretLen (when it's static)
  4756. * to the compiler, so that it can properly optimize the vectorized loop.
  4757. */
  4758. XXH_FORCE_INLINE XXH128_hash_t
  4759. XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
  4760. XXH64_hash_t seed64,
  4761. const void* XXH_RESTRICT secret, size_t secretLen)
  4762. {
  4763. (void)seed64;
  4764. return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
  4765. XXH3_accumulate_512, XXH3_scrambleAcc);
  4766. }
  4767. XXH_FORCE_INLINE XXH128_hash_t
  4768. XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
  4769. XXH64_hash_t seed64,
  4770. XXH3_f_accumulate_512 f_acc512,
  4771. XXH3_f_scrambleAcc f_scramble,
  4772. XXH3_f_initCustomSecret f_initSec)
  4773. {
  4774. if (seed64 == 0)
  4775. return XXH3_hashLong_128b_internal(input, len,
  4776. XXH3_kSecret, sizeof(XXH3_kSecret),
  4777. f_acc512, f_scramble);
  4778. { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  4779. f_initSec(secret, seed64);
  4780. return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
  4781. f_acc512, f_scramble);
  4782. }
  4783. }
  4784. /*
  4785. * It's important for performance that XXH3_hashLong is not inlined.
  4786. */
  4787. XXH_NO_INLINE XXH128_hash_t
  4788. XXH3_hashLong_128b_withSeed(const void* input, size_t len,
  4789. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
  4790. {
  4791. (void)secret; (void)secretLen;
  4792. return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
  4793. XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
  4794. }
  4795. typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
  4796. XXH64_hash_t, const void* XXH_RESTRICT, size_t);
  4797. XXH_FORCE_INLINE XXH128_hash_t
  4798. XXH3_128bits_internal(const void* input, size_t len,
  4799. XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
  4800. XXH3_hashLong128_f f_hl128)
  4801. {
  4802. XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
  4803. /*
  4804. * If an action is to be taken if `secret` conditions are not respected,
  4805. * it should be done here.
  4806. * For now, it's a contract pre-condition.
  4807. * Adding a check and a branch here would cost performance at every hash.
  4808. */
  4809. if (len <= 16)
  4810. return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
  4811. if (len <= 128)
  4812. return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  4813. if (len <= XXH3_MIDSIZE_MAX)
  4814. return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
  4815. return f_hl128(input, len, seed64, secret, secretLen);
  4816. }
  4817. /* === Public XXH128 API === */
  4818. /*! @ingroup xxh3_family */
  4819. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
  4820. {
  4821. return XXH3_128bits_internal(input, len, 0,
  4822. XXH3_kSecret, sizeof(XXH3_kSecret),
  4823. XXH3_hashLong_128b_default);
  4824. }
  4825. /*! @ingroup xxh3_family */
  4826. XXH_PUBLIC_API XXH128_hash_t
  4827. XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
  4828. {
  4829. return XXH3_128bits_internal(input, len, 0,
  4830. (const xxh_u8*)secret, secretSize,
  4831. XXH3_hashLong_128b_withSecret);
  4832. }
  4833. /*! @ingroup xxh3_family */
  4834. XXH_PUBLIC_API XXH128_hash_t
  4835. XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
  4836. {
  4837. return XXH3_128bits_internal(input, len, seed,
  4838. XXH3_kSecret, sizeof(XXH3_kSecret),
  4839. XXH3_hashLong_128b_withSeed);
  4840. }
  4841. /*! @ingroup xxh3_family */
  4842. XXH_PUBLIC_API XXH128_hash_t
  4843. XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
  4844. {
  4845. if (len <= XXH3_MIDSIZE_MAX)
  4846. return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
  4847. return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
  4848. }
  4849. /*! @ingroup xxh3_family */
  4850. XXH_PUBLIC_API XXH128_hash_t
  4851. XXH128(const void* input, size_t len, XXH64_hash_t seed)
  4852. {
  4853. return XXH3_128bits_withSeed(input, len, seed);
  4854. }
  4855. /* === XXH3 128-bit streaming === */
  4856. /*
  4857. * All initialization and update functions are identical to 64-bit streaming variant.
  4858. * The only difference is the finalization routine.
  4859. */
  4860. /*! @ingroup xxh3_family */
  4861. XXH_PUBLIC_API XXH_errorcode
  4862. XXH3_128bits_reset(XXH3_state_t* statePtr)
  4863. {
  4864. return XXH3_64bits_reset(statePtr);
  4865. }
  4866. /*! @ingroup xxh3_family */
  4867. XXH_PUBLIC_API XXH_errorcode
  4868. XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
  4869. {
  4870. return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
  4871. }
  4872. /*! @ingroup xxh3_family */
  4873. XXH_PUBLIC_API XXH_errorcode
  4874. XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
  4875. {
  4876. return XXH3_64bits_reset_withSeed(statePtr, seed);
  4877. }
  4878. /*! @ingroup xxh3_family */
  4879. XXH_PUBLIC_API XXH_errorcode
  4880. XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
  4881. {
  4882. return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
  4883. }
  4884. /*! @ingroup xxh3_family */
  4885. XXH_PUBLIC_API XXH_errorcode
  4886. XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
  4887. {
  4888. return XXH3_update(state, (const xxh_u8*)input, len,
  4889. XXH3_accumulate_512, XXH3_scrambleAcc);
  4890. }
  4891. /*! @ingroup xxh3_family */
  4892. XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
  4893. {
  4894. const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
  4895. if (state->totalLen > XXH3_MIDSIZE_MAX) {
  4896. XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
  4897. XXH3_digest_long(acc, state, secret);
  4898. XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
  4899. { XXH128_hash_t h128;
  4900. h128.low64 = XXH3_mergeAccs(acc,
  4901. secret + XXH_SECRET_MERGEACCS_START,
  4902. (xxh_u64)state->totalLen * XXH_PRIME64_1);
  4903. h128.high64 = XXH3_mergeAccs(acc,
  4904. secret + state->secretLimit + XXH_STRIPE_LEN
  4905. - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
  4906. ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
  4907. return h128;
  4908. }
  4909. }
  4910. /* len <= XXH3_MIDSIZE_MAX : short code */
  4911. if (state->seed)
  4912. return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
  4913. return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
  4914. secret, state->secretLimit + XXH_STRIPE_LEN);
  4915. }
  4916. /* 128-bit utility functions */
  4917. #include <string.h> /* memcmp, memcpy */
  4918. /* return : 1 is equal, 0 if different */
  4919. /*! @ingroup xxh3_family */
  4920. XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
  4921. {
  4922. /* note : XXH128_hash_t is compact, it has no padding byte */
  4923. return !(memcmp(&h1, &h2, sizeof(h1)));
  4924. }
  4925. /* This prototype is compatible with stdlib's qsort().
  4926. * return : >0 if *h128_1 > *h128_2
  4927. * <0 if *h128_1 < *h128_2
  4928. * =0 if *h128_1 == *h128_2 */
  4929. /*! @ingroup xxh3_family */
  4930. XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
  4931. {
  4932. XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
  4933. XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
  4934. int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
  4935. /* note : bets that, in most cases, hash values are different */
  4936. if (hcmp) return hcmp;
  4937. return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
  4938. }
  4939. /*====== Canonical representation ======*/
  4940. /*! @ingroup xxh3_family */
  4941. XXH_PUBLIC_API void
  4942. XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
  4943. {
  4944. XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
  4945. if (XXH_CPU_LITTLE_ENDIAN) {
  4946. hash.high64 = XXH_swap64(hash.high64);
  4947. hash.low64 = XXH_swap64(hash.low64);
  4948. }
  4949. XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
  4950. XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
  4951. }
  4952. /*! @ingroup xxh3_family */
  4953. XXH_PUBLIC_API XXH128_hash_t
  4954. XXH128_hashFromCanonical(const XXH128_canonical_t* src)
  4955. {
  4956. XXH128_hash_t h;
  4957. h.high64 = XXH_readBE64(src);
  4958. h.low64 = XXH_readBE64(src->digest + 8);
  4959. return h;
  4960. }
  4961. /* ==========================================
  4962. * Secret generators
  4963. * ==========================================
  4964. */
  4965. #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
  4966. static void XXH3_combine16(void* dst, XXH128_hash_t h128)
  4967. {
  4968. XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
  4969. XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
  4970. }
  4971. /*! @ingroup xxh3_family */
  4972. XXH_PUBLIC_API XXH_errorcode
  4973. XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
  4974. {
  4975. XXH_ASSERT(secretBuffer != NULL);
  4976. if (secretBuffer == NULL) return XXH_ERROR;
  4977. XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
  4978. if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
  4979. if (customSeedSize == 0) {
  4980. customSeed = XXH3_kSecret;
  4981. customSeedSize = XXH_SECRET_DEFAULT_SIZE;
  4982. }
  4983. XXH_ASSERT(customSeed != NULL);
  4984. if (customSeed == NULL) return XXH_ERROR;
  4985. /* Fill secretBuffer with a copy of customSeed - repeat as needed */
  4986. { size_t pos = 0;
  4987. while (pos < secretSize) {
  4988. size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
  4989. memcpy((char*)secretBuffer + pos, customSeed, toCopy);
  4990. pos += toCopy;
  4991. } }
  4992. { size_t const nbSeg16 = secretSize / 16;
  4993. size_t n;
  4994. XXH128_canonical_t scrambler;
  4995. XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
  4996. for (n=0; n<nbSeg16; n++) {
  4997. XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
  4998. XXH3_combine16((char*)secretBuffer + n*16, h128);
  4999. }
  5000. /* last segment */
  5001. XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
  5002. }
  5003. return XXH_OK;
  5004. }
  5005. /*! @ingroup xxh3_family */
  5006. XXH_PUBLIC_API void
  5007. XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
  5008. {
  5009. XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
  5010. XXH3_initCustomSecret(secret, seed);
  5011. XXH_ASSERT(secretBuffer != NULL);
  5012. memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
  5013. }
  5014. /* Pop our optimization override from above */
  5015. #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
  5016. && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
  5017. && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
  5018. # pragma GCC pop_options
  5019. #endif
  5020. #endif /* XXH_NO_LONG_LONG */
  5021. #endif /* XXH_NO_XXH3 */
  5022. /*!
  5023. * @}
  5024. */
  5025. #endif /* XXH_IMPLEMENTATION */
  5026. #if defined (__cplusplus)
  5027. }
  5028. #endif