mini-gmp.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579
  1. /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
  2. Contributed to the GNU project by Niels Möller
  3. Copyright 1991-1997, 1999-2019,2021 Free Software Foundation, Inc.
  4. This file is part of the GNU MP Library.
  5. The GNU MP Library is free software; you can redistribute it and/or modify
  6. it under the terms of either:
  7. * the GNU Lesser General Public License as published by the Free
  8. Software Foundation; either version 3 of the License, or (at your
  9. option) any later version.
  10. or
  11. * the GNU General Public License as published by the Free Software
  12. Foundation; either version 2 of the License, or (at your option) any
  13. later version.
  14. or both in parallel, as here.
  15. The GNU MP Library is distributed in the hope that it will be useful, but
  16. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  17. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. for more details.
  19. You should have received copies of the GNU General Public License and the
  20. GNU Lesser General Public License along with the GNU MP Library. If not,
  21. see https://www.gnu.org/licenses/. */
  22. /* NOTE: All functions in this file which are not declared in
  23. mini-gmp.h are internal, and are not intended to be compatible
  24. with GMP or with future versions of mini-gmp. */
  25. /* Much of the material copied from GMP files, including: gmp-impl.h,
  26. longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
  27. mpn/generic/lshift.c, mpn/generic/mul_1.c,
  28. mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
  29. mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
  30. mpn/generic/submul_1.c. */
  31. #ifdef HAVE_CONFIG_H
  32. # include <config.h>
  33. #endif
  34. #include <assert.h>
  35. #include <ctype.h>
  36. #include <limits.h>
  37. #include <stdint.h>
  38. #include <stdio.h>
  39. #include <stdlib.h>
  40. #include <string.h>
  41. #include "mini-gmp.h"
  42. #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
  43. #include <float.h>
  44. #endif
  45. /* Macros */
  46. #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
  47. #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
  48. #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
  49. #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
  50. #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
  51. #define GMP_ULONG_BITS (sizeof(uintptr_t) * CHAR_BIT)
  52. #define GMP_ULONG_HIGHBIT ((uintptr_t) 1 << (GMP_ULONG_BITS - 1))
  53. #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
  54. #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
  55. #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
  56. #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
  57. #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
  58. #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
  59. #define GMP_DBL_MANT_BITS DBL_MANT_DIG
  60. #else
  61. #define GMP_DBL_MANT_BITS (53)
  62. #endif
  63. /* Return non-zero if xp,xsize and yp,ysize overlap.
  64. If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
  65. overlap. If both these are false, there's an overlap. */
  66. #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
  67. ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
  68. #define gmp_assert_nocarry(x) do { \
  69. mp_limb_t __cy = (x); \
  70. assert (__cy == 0); \
  71. } while (0)
  72. #define gmp_clz(count, x) do { \
  73. mp_limb_t __clz_x = (x); \
  74. unsigned __clz_c = 0; \
  75. int LOCAL_SHIFT_BITS = 8; \
  76. if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS) \
  77. for (; \
  78. (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
  79. __clz_c += 8) \
  80. { __clz_x <<= LOCAL_SHIFT_BITS; } \
  81. for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
  82. __clz_x <<= 1; \
  83. (count) = __clz_c; \
  84. } while (0)
  85. #define gmp_ctz(count, x) do { \
  86. mp_limb_t __ctz_x = (x); \
  87. unsigned __ctz_c = 0; \
  88. gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
  89. (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
  90. } while (0)
  91. #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
  92. do { \
  93. mp_limb_t __x; \
  94. __x = (al) + (bl); \
  95. (sh) = (ah) + (bh) + (__x < (al)); \
  96. (sl) = __x; \
  97. } while (0)
  98. #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
  99. do { \
  100. mp_limb_t __x; \
  101. __x = (al) - (bl); \
  102. (sh) = (ah) - (bh) - ((al) < (bl)); \
  103. (sl) = __x; \
  104. } while (0)
  105. #define gmp_umul_ppmm(w1, w0, u, v) \
  106. do { \
  107. int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS; \
  108. if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS) \
  109. { \
  110. unsigned int __ww = (unsigned int) (u) * (v); \
  111. w0 = (mp_limb_t) __ww; \
  112. w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
  113. } \
  114. else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS) \
  115. { \
  116. uintptr_t __ww = (uintptr_t) (u) * (v); \
  117. w0 = (mp_limb_t) __ww; \
  118. w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
  119. } \
  120. else { \
  121. mp_limb_t __x0, __x1, __x2, __x3; \
  122. unsigned __ul, __vl, __uh, __vh; \
  123. mp_limb_t __u = (u), __v = (v); \
  124. \
  125. __ul = __u & GMP_LLIMB_MASK; \
  126. __uh = __u >> (GMP_LIMB_BITS / 2); \
  127. __vl = __v & GMP_LLIMB_MASK; \
  128. __vh = __v >> (GMP_LIMB_BITS / 2); \
  129. \
  130. __x0 = (mp_limb_t) __ul * __vl; \
  131. __x1 = (mp_limb_t) __ul * __vh; \
  132. __x2 = (mp_limb_t) __uh * __vl; \
  133. __x3 = (mp_limb_t) __uh * __vh; \
  134. \
  135. __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
  136. __x1 += __x2; /* but this indeed can */ \
  137. if (__x1 < __x2) /* did we get it? */ \
  138. __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
  139. \
  140. (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
  141. (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
  142. } \
  143. } while (0)
  144. #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
  145. do { \
  146. mp_limb_t _qh, _ql, _r, _mask; \
  147. gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
  148. gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
  149. _r = (nl) - _qh * (d); \
  150. _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
  151. _qh += _mask; \
  152. _r += _mask & (d); \
  153. if (_r >= (d)) \
  154. { \
  155. _r -= (d); \
  156. _qh++; \
  157. } \
  158. \
  159. (r) = _r; \
  160. (q) = _qh; \
  161. } while (0)
  162. #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
  163. do { \
  164. mp_limb_t _q0, _t1, _t0, _mask; \
  165. gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
  166. gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
  167. \
  168. /* Compute the two most significant limbs of n - q'd */ \
  169. (r1) = (n1) - (d1) * (q); \
  170. gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
  171. gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
  172. gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
  173. (q)++; \
  174. \
  175. /* Conditionally adjust q and the remainders */ \
  176. _mask = - (mp_limb_t) ((r1) >= _q0); \
  177. (q) += _mask; \
  178. gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
  179. if ((r1) >= (d1)) \
  180. { \
  181. if ((r1) > (d1) || (r0) >= (d0)) \
  182. { \
  183. (q)++; \
  184. gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
  185. } \
  186. } \
  187. } while (0)
  188. /* Swap macros. */
  189. #define MP_LIMB_T_SWAP(x, y) \
  190. do { \
  191. mp_limb_t __mp_limb_t_swap__tmp = (x); \
  192. (x) = (y); \
  193. (y) = __mp_limb_t_swap__tmp; \
  194. } while (0)
  195. #define MP_SIZE_T_SWAP(x, y) \
  196. do { \
  197. mp_size_t __mp_size_t_swap__tmp = (x); \
  198. (x) = (y); \
  199. (y) = __mp_size_t_swap__tmp; \
  200. } while (0)
  201. #define MP_BITCNT_T_SWAP(x,y) \
  202. do { \
  203. mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
  204. (x) = (y); \
  205. (y) = __mp_bitcnt_t_swap__tmp; \
  206. } while (0)
  207. #define MP_PTR_SWAP(x, y) \
  208. do { \
  209. mp_ptr __mp_ptr_swap__tmp = (x); \
  210. (x) = (y); \
  211. (y) = __mp_ptr_swap__tmp; \
  212. } while (0)
  213. #define MP_SRCPTR_SWAP(x, y) \
  214. do { \
  215. mp_srcptr __mp_srcptr_swap__tmp = (x); \
  216. (x) = (y); \
  217. (y) = __mp_srcptr_swap__tmp; \
  218. } while (0)
  219. #define MPN_PTR_SWAP(xp,xs, yp,ys) \
  220. do { \
  221. MP_PTR_SWAP (xp, yp); \
  222. MP_SIZE_T_SWAP (xs, ys); \
  223. } while(0)
  224. #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
  225. do { \
  226. MP_SRCPTR_SWAP (xp, yp); \
  227. MP_SIZE_T_SWAP (xs, ys); \
  228. } while(0)
  229. #define MPZ_PTR_SWAP(x, y) \
  230. do { \
  231. mpz_ptr __mpz_ptr_swap__tmp = (x); \
  232. (x) = (y); \
  233. (y) = __mpz_ptr_swap__tmp; \
  234. } while (0)
  235. #define MPZ_SRCPTR_SWAP(x, y) \
  236. do { \
  237. mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
  238. (x) = (y); \
  239. (y) = __mpz_srcptr_swap__tmp; \
  240. } while (0)
  241. const int mp_bits_per_limb = GMP_LIMB_BITS;
  242. /* Memory allocation and other helper functions. */
  243. static void
  244. gmp_die (const char *msg)
  245. {
  246. fprintf (stderr, "%s\n", msg);
  247. abort();
  248. }
  249. static void *
  250. gmp_default_alloc (size_t size)
  251. {
  252. void *p;
  253. assert (size > 0);
  254. p = malloc (size);
  255. if (!p)
  256. gmp_die("gmp_default_alloc: Virtual memory exhausted.");
  257. return p;
  258. }
  259. static void *
  260. gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size)
  261. {
  262. void * p;
  263. p = realloc (old, new_size);
  264. if (!p)
  265. gmp_die("gmp_default_realloc: Virtual memory exhausted.");
  266. return p;
  267. }
  268. static void
  269. gmp_default_free (void *p, size_t unused_size)
  270. {
  271. free (p);
  272. }
  273. static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
  274. static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
  275. static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
  276. void
  277. mp_get_memory_functions (void *(**alloc_func) (size_t),
  278. void *(**realloc_func) (void *, size_t, size_t),
  279. void (**free_func) (void *, size_t))
  280. {
  281. if (alloc_func)
  282. *alloc_func = gmp_allocate_func;
  283. if (realloc_func)
  284. *realloc_func = gmp_reallocate_func;
  285. if (free_func)
  286. *free_func = gmp_free_func;
  287. }
  288. void
  289. mp_set_memory_functions (void *(*alloc_func) (size_t),
  290. void *(*realloc_func) (void *, size_t, size_t),
  291. void (*free_func) (void *, size_t))
  292. {
  293. if (!alloc_func)
  294. alloc_func = gmp_default_alloc;
  295. if (!realloc_func)
  296. realloc_func = gmp_default_realloc;
  297. if (!free_func)
  298. free_func = gmp_default_free;
  299. gmp_allocate_func = alloc_func;
  300. gmp_reallocate_func = realloc_func;
  301. gmp_free_func = free_func;
  302. }
  303. #define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
  304. #define gmp_free(p) ((*gmp_free_func) ((p), 0))
  305. static mp_ptr
  306. gmp_xalloc_limbs (mp_size_t size)
  307. {
  308. return (mp_ptr) gmp_xalloc (size * sizeof (mp_limb_t));
  309. }
  310. static mp_ptr
  311. gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
  312. {
  313. assert (size > 0);
  314. return (mp_ptr) (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
  315. }
  316. /* MPN interface */
  317. void
  318. mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
  319. {
  320. mp_size_t i;
  321. for (i = 0; i < n; i++)
  322. d[i] = s[i];
  323. }
  324. void
  325. mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
  326. {
  327. while (--n >= 0)
  328. d[n] = s[n];
  329. }
  330. int
  331. mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  332. {
  333. while (--n >= 0)
  334. {
  335. if (ap[n] != bp[n])
  336. return ap[n] > bp[n] ? 1 : -1;
  337. }
  338. return 0;
  339. }
  340. static int
  341. mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  342. {
  343. if (an != bn)
  344. return an < bn ? -1 : 1;
  345. else
  346. return mpn_cmp (ap, bp, an);
  347. }
  348. static mp_size_t
  349. mpn_normalized_size (mp_srcptr xp, mp_size_t n)
  350. {
  351. while (n > 0 && xp[n-1] == 0)
  352. --n;
  353. return n;
  354. }
  355. int
  356. mpn_zero_p(mp_srcptr rp, mp_size_t n)
  357. {
  358. return mpn_normalized_size (rp, n) == 0;
  359. }
  360. void
  361. mpn_zero (mp_ptr rp, mp_size_t n)
  362. {
  363. while (--n >= 0)
  364. rp[n] = 0;
  365. }
  366. mp_limb_t
  367. mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
  368. {
  369. mp_size_t i;
  370. assert (n > 0);
  371. i = 0;
  372. do
  373. {
  374. mp_limb_t r = ap[i] + b;
  375. /* Carry out */
  376. b = (r < b);
  377. rp[i] = r;
  378. }
  379. while (++i < n);
  380. return b;
  381. }
  382. mp_limb_t
  383. mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  384. {
  385. mp_size_t i;
  386. mp_limb_t cy;
  387. for (i = 0, cy = 0; i < n; i++)
  388. {
  389. mp_limb_t a, b, r;
  390. a = ap[i]; b = bp[i];
  391. r = a + cy;
  392. cy = (r < cy);
  393. r += b;
  394. cy += (r < b);
  395. rp[i] = r;
  396. }
  397. return cy;
  398. }
  399. mp_limb_t
  400. mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  401. {
  402. mp_limb_t cy;
  403. assert (an >= bn);
  404. cy = mpn_add_n (rp, ap, bp, bn);
  405. if (an > bn)
  406. cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
  407. return cy;
  408. }
  409. mp_limb_t
  410. mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
  411. {
  412. mp_size_t i;
  413. assert (n > 0);
  414. i = 0;
  415. do
  416. {
  417. mp_limb_t a = ap[i];
  418. /* Carry out */
  419. mp_limb_t cy = a < b;
  420. rp[i] = a - b;
  421. b = cy;
  422. }
  423. while (++i < n);
  424. return b;
  425. }
  426. mp_limb_t
  427. mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  428. {
  429. mp_size_t i;
  430. mp_limb_t cy;
  431. for (i = 0, cy = 0; i < n; i++)
  432. {
  433. mp_limb_t a, b;
  434. a = ap[i]; b = bp[i];
  435. b += cy;
  436. cy = (b < cy);
  437. cy += (a < b);
  438. rp[i] = a - b;
  439. }
  440. return cy;
  441. }
  442. mp_limb_t
  443. mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  444. {
  445. mp_limb_t cy;
  446. assert (an >= bn);
  447. cy = mpn_sub_n (rp, ap, bp, bn);
  448. if (an > bn)
  449. cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
  450. return cy;
  451. }
  452. mp_limb_t
  453. mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  454. {
  455. mp_limb_t ul, cl, hpl, lpl;
  456. assert (n >= 1);
  457. cl = 0;
  458. do
  459. {
  460. ul = *up++;
  461. gmp_umul_ppmm (hpl, lpl, ul, vl);
  462. lpl += cl;
  463. cl = (lpl < cl) + hpl;
  464. *rp++ = lpl;
  465. }
  466. while (--n != 0);
  467. return cl;
  468. }
  469. mp_limb_t
  470. mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  471. {
  472. mp_limb_t ul, cl, hpl, lpl, rl;
  473. assert (n >= 1);
  474. cl = 0;
  475. do
  476. {
  477. ul = *up++;
  478. gmp_umul_ppmm (hpl, lpl, ul, vl);
  479. lpl += cl;
  480. cl = (lpl < cl) + hpl;
  481. rl = *rp;
  482. lpl = rl + lpl;
  483. cl += lpl < rl;
  484. *rp++ = lpl;
  485. }
  486. while (--n != 0);
  487. return cl;
  488. }
  489. mp_limb_t
  490. mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  491. {
  492. mp_limb_t ul, cl, hpl, lpl, rl;
  493. assert (n >= 1);
  494. cl = 0;
  495. do
  496. {
  497. ul = *up++;
  498. gmp_umul_ppmm (hpl, lpl, ul, vl);
  499. lpl += cl;
  500. cl = (lpl < cl) + hpl;
  501. rl = *rp;
  502. lpl = rl - lpl;
  503. cl += lpl > rl;
  504. *rp++ = lpl;
  505. }
  506. while (--n != 0);
  507. return cl;
  508. }
  509. mp_limb_t
  510. mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
  511. {
  512. assert (un >= vn);
  513. assert (vn >= 1);
  514. assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un));
  515. assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn));
  516. /* We first multiply by the low order limb. This result can be
  517. stored, not added, to rp. We also avoid a loop for zeroing this
  518. way. */
  519. rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
  520. /* Now accumulate the product of up[] and the next higher limb from
  521. vp[]. */
  522. while (--vn >= 1)
  523. {
  524. rp += 1, vp += 1;
  525. rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
  526. }
  527. return rp[un];
  528. }
  529. void
  530. mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  531. {
  532. mpn_mul (rp, ap, n, bp, n);
  533. }
  534. void
  535. mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
  536. {
  537. mpn_mul (rp, ap, n, ap, n);
  538. }
  539. mp_limb_t
  540. mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
  541. {
  542. mp_limb_t high_limb, low_limb;
  543. unsigned int tnc;
  544. mp_limb_t retval;
  545. assert (n >= 1);
  546. assert (cnt >= 1);
  547. assert (cnt < GMP_LIMB_BITS);
  548. up += n;
  549. rp += n;
  550. tnc = GMP_LIMB_BITS - cnt;
  551. low_limb = *--up;
  552. retval = low_limb >> tnc;
  553. high_limb = (low_limb << cnt);
  554. while (--n != 0)
  555. {
  556. low_limb = *--up;
  557. *--rp = high_limb | (low_limb >> tnc);
  558. high_limb = (low_limb << cnt);
  559. }
  560. *--rp = high_limb;
  561. return retval;
  562. }
  563. mp_limb_t
  564. mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
  565. {
  566. mp_limb_t high_limb, low_limb;
  567. unsigned int tnc;
  568. mp_limb_t retval;
  569. assert (n >= 1);
  570. assert (cnt >= 1);
  571. assert (cnt < GMP_LIMB_BITS);
  572. tnc = GMP_LIMB_BITS - cnt;
  573. high_limb = *up++;
  574. retval = (high_limb << tnc);
  575. low_limb = high_limb >> cnt;
  576. while (--n != 0)
  577. {
  578. high_limb = *up++;
  579. *rp++ = low_limb | (high_limb << tnc);
  580. low_limb = high_limb >> cnt;
  581. }
  582. *rp = low_limb;
  583. return retval;
  584. }
  585. static mp_bitcnt_t
  586. mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
  587. mp_limb_t ux)
  588. {
  589. unsigned cnt;
  590. assert (ux == 0 || ux == GMP_LIMB_MAX);
  591. assert (0 <= i && i <= un );
  592. while (limb == 0)
  593. {
  594. i++;
  595. if (i == un)
  596. return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
  597. limb = ux ^ up[i];
  598. }
  599. gmp_ctz (cnt, limb);
  600. return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
  601. }
  602. mp_bitcnt_t
  603. mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
  604. {
  605. mp_size_t i;
  606. i = bit / GMP_LIMB_BITS;
  607. return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
  608. i, ptr, i, 0);
  609. }
  610. mp_bitcnt_t
  611. mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
  612. {
  613. mp_size_t i;
  614. i = bit / GMP_LIMB_BITS;
  615. return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
  616. i, ptr, i, GMP_LIMB_MAX);
  617. }
  618. void
  619. mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n)
  620. {
  621. while (--n >= 0)
  622. *rp++ = ~ *up++;
  623. }
  624. mp_limb_t
  625. mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n)
  626. {
  627. while (*up == 0)
  628. {
  629. *rp = 0;
  630. if (!--n)
  631. return 0;
  632. ++up; ++rp;
  633. }
  634. *rp = - *up;
  635. mpn_com (++rp, ++up, --n);
  636. return 1;
  637. }
  638. /* MPN division interface. */
  639. /* The 3/2 inverse is defined as
  640. m = floor( (B^3-1) / (B u1 + u0)) - B
  641. */
  642. mp_limb_t
  643. mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
  644. {
  645. mp_limb_t r, m;
  646. {
  647. mp_limb_t p, ql;
  648. unsigned ul, uh, qh;
  649. /* For notation, let b denote the half-limb base, so that B = b^2.
  650. Split u1 = b uh + ul. */
  651. ul = u1 & GMP_LLIMB_MASK;
  652. uh = u1 >> (GMP_LIMB_BITS / 2);
  653. /* Approximation of the high half of quotient. Differs from the 2/1
  654. inverse of the half limb uh, since we have already subtracted
  655. u0. */
  656. qh = (u1 ^ GMP_LIMB_MAX) / uh;
  657. /* Adjust to get a half-limb 3/2 inverse, i.e., we want
  658. qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
  659. = floor( (b (~u) + b-1) / u),
  660. and the remainder
  661. r = b (~u) + b-1 - qh (b uh + ul)
  662. = b (~u - qh uh) + b-1 - qh ul
  663. Subtraction of qh ul may underflow, which implies adjustments.
  664. But by normalization, 2 u >= B > qh ul, so we need to adjust by
  665. at most 2.
  666. */
  667. r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
  668. p = (mp_limb_t) qh * ul;
  669. /* Adjustment steps taken from udiv_qrnnd_c */
  670. if (r < p)
  671. {
  672. qh--;
  673. r += u1;
  674. if (r >= u1) /* i.e. we didn't get carry when adding to r */
  675. if (r < p)
  676. {
  677. qh--;
  678. r += u1;
  679. }
  680. }
  681. r -= p;
  682. /* Low half of the quotient is
  683. ql = floor ( (b r + b-1) / u1).
  684. This is a 3/2 division (on half-limbs), for which qh is a
  685. suitable inverse. */
  686. p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
  687. /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
  688. work, it is essential that ql is a full mp_limb_t. */
  689. ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
  690. /* By the 3/2 trick, we don't need the high half limb. */
  691. r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
  692. if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2))))
  693. {
  694. ql--;
  695. r += u1;
  696. }
  697. m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
  698. if (r >= u1)
  699. {
  700. m++;
  701. r -= u1;
  702. }
  703. }
  704. /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
  705. 3/2 inverse. */
  706. if (u0 > 0)
  707. {
  708. mp_limb_t th, tl;
  709. r = ~r;
  710. r += u0;
  711. if (r < u0)
  712. {
  713. m--;
  714. if (r >= u1)
  715. {
  716. m--;
  717. r -= u1;
  718. }
  719. r -= u1;
  720. }
  721. gmp_umul_ppmm (th, tl, u0, m);
  722. r += th;
  723. if (r < th)
  724. {
  725. m--;
  726. m -= ((r > u1) | ((r == u1) & (tl > u0)));
  727. }
  728. }
  729. return m;
  730. }
  731. struct gmp_div_inverse
  732. {
  733. /* Normalization shift count. */
  734. unsigned shift;
  735. /* Normalized divisor (d0 unused for mpn_div_qr_1) */
  736. mp_limb_t d1, d0;
  737. /* Inverse, for 2/1 or 3/2. */
  738. mp_limb_t di;
  739. };
  740. static void
  741. mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
  742. {
  743. unsigned shift;
  744. assert (d > 0);
  745. gmp_clz (shift, d);
  746. inv->shift = shift;
  747. inv->d1 = d << shift;
  748. inv->di = mpn_invert_limb (inv->d1);
  749. }
  750. static void
  751. mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
  752. mp_limb_t d1, mp_limb_t d0)
  753. {
  754. unsigned shift;
  755. assert (d1 > 0);
  756. gmp_clz (shift, d1);
  757. inv->shift = shift;
  758. if (shift > 0)
  759. {
  760. d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
  761. d0 <<= shift;
  762. }
  763. inv->d1 = d1;
  764. inv->d0 = d0;
  765. inv->di = mpn_invert_3by2 (d1, d0);
  766. }
  767. static void
  768. mpn_div_qr_invert (struct gmp_div_inverse *inv,
  769. mp_srcptr dp, mp_size_t dn)
  770. {
  771. assert (dn > 0);
  772. if (dn == 1)
  773. mpn_div_qr_1_invert (inv, dp[0]);
  774. else if (dn == 2)
  775. mpn_div_qr_2_invert (inv, dp[1], dp[0]);
  776. else
  777. {
  778. unsigned shift;
  779. mp_limb_t d1, d0;
  780. d1 = dp[dn-1];
  781. d0 = dp[dn-2];
  782. assert (d1 > 0);
  783. gmp_clz (shift, d1);
  784. inv->shift = shift;
  785. if (shift > 0)
  786. {
  787. d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
  788. d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
  789. }
  790. inv->d1 = d1;
  791. inv->d0 = d0;
  792. inv->di = mpn_invert_3by2 (d1, d0);
  793. }
  794. }
  795. /* Not matching current public gmp interface, rather corresponding to
  796. the sbpi1_div_* functions. */
  797. static mp_limb_t
  798. mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
  799. const struct gmp_div_inverse *inv)
  800. {
  801. mp_limb_t d, di;
  802. mp_limb_t r;
  803. mp_ptr tp = NULL;
  804. if (inv->shift > 0)
  805. {
  806. /* Shift, reusing qp area if possible. In-place shift if qp == np. */
  807. tp = qp ? qp : gmp_xalloc_limbs (nn);
  808. r = mpn_lshift (tp, np, nn, inv->shift);
  809. np = tp;
  810. }
  811. else
  812. r = 0;
  813. d = inv->d1;
  814. di = inv->di;
  815. while (--nn >= 0)
  816. {
  817. mp_limb_t q;
  818. gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
  819. if (qp)
  820. qp[nn] = q;
  821. }
  822. if ((inv->shift > 0) && (tp != qp))
  823. gmp_free (tp);
  824. return r >> inv->shift;
  825. }
  826. static void
  827. mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
  828. const struct gmp_div_inverse *inv)
  829. {
  830. unsigned shift;
  831. mp_size_t i;
  832. mp_limb_t d1, d0, di, r1, r0;
  833. assert (nn >= 2);
  834. shift = inv->shift;
  835. d1 = inv->d1;
  836. d0 = inv->d0;
  837. di = inv->di;
  838. if (shift > 0)
  839. r1 = mpn_lshift (np, np, nn, shift);
  840. else
  841. r1 = 0;
  842. r0 = np[nn - 1];
  843. i = nn - 2;
  844. do
  845. {
  846. mp_limb_t n0, q;
  847. n0 = np[i];
  848. gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
  849. if (qp)
  850. qp[i] = q;
  851. }
  852. while (--i >= 0);
  853. if (shift > 0)
  854. {
  855. assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0);
  856. r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
  857. r1 >>= shift;
  858. }
  859. np[1] = r1;
  860. np[0] = r0;
  861. }
  862. static void
  863. mpn_div_qr_pi1 (mp_ptr qp,
  864. mp_ptr np, mp_size_t nn, mp_limb_t n1,
  865. mp_srcptr dp, mp_size_t dn,
  866. mp_limb_t dinv)
  867. {
  868. mp_size_t i;
  869. mp_limb_t d1, d0;
  870. mp_limb_t cy, cy1;
  871. mp_limb_t q;
  872. assert (dn > 2);
  873. assert (nn >= dn);
  874. d1 = dp[dn - 1];
  875. d0 = dp[dn - 2];
  876. assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
  877. /* Iteration variable is the index of the q limb.
  878. *
  879. * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
  880. * by <d1, d0, dp[dn-3], ..., dp[0] >
  881. */
  882. i = nn - dn;
  883. do
  884. {
  885. mp_limb_t n0 = np[dn-1+i];
  886. if (n1 == d1 && n0 == d0)
  887. {
  888. q = GMP_LIMB_MAX;
  889. mpn_submul_1 (np+i, dp, dn, q);
  890. n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */
  891. }
  892. else
  893. {
  894. gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
  895. cy = mpn_submul_1 (np + i, dp, dn-2, q);
  896. cy1 = n0 < cy;
  897. n0 = n0 - cy;
  898. cy = n1 < cy1;
  899. n1 = n1 - cy1;
  900. np[dn-2+i] = n0;
  901. if (cy != 0)
  902. {
  903. n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
  904. q--;
  905. }
  906. }
  907. if (qp)
  908. qp[i] = q;
  909. }
  910. while (--i >= 0);
  911. np[dn - 1] = n1;
  912. }
  913. static void
  914. mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
  915. mp_srcptr dp, mp_size_t dn,
  916. const struct gmp_div_inverse *inv)
  917. {
  918. assert (dn > 0);
  919. assert (nn >= dn);
  920. if (dn == 1)
  921. np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
  922. else if (dn == 2)
  923. mpn_div_qr_2_preinv (qp, np, nn, inv);
  924. else
  925. {
  926. mp_limb_t nh;
  927. unsigned shift;
  928. assert (inv->d1 == dp[dn-1]);
  929. assert (inv->d0 == dp[dn-2]);
  930. assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
  931. shift = inv->shift;
  932. if (shift > 0)
  933. nh = mpn_lshift (np, np, nn, shift);
  934. else
  935. nh = 0;
  936. mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
  937. if (shift > 0)
  938. gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
  939. }
  940. }
  941. static void
  942. mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
  943. {
  944. struct gmp_div_inverse inv;
  945. mp_ptr tp = NULL;
  946. assert (dn > 0);
  947. assert (nn >= dn);
  948. mpn_div_qr_invert (&inv, dp, dn);
  949. if (dn > 2 && inv.shift > 0)
  950. {
  951. tp = gmp_xalloc_limbs (dn);
  952. gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
  953. dp = tp;
  954. }
  955. mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
  956. if (tp)
  957. gmp_free (tp);
  958. }
  959. /* MPN base conversion. */
  960. static unsigned
  961. mpn_base_power_of_two_p (unsigned b)
  962. {
  963. switch (b)
  964. {
  965. case 2: return 1;
  966. case 4: return 2;
  967. case 8: return 3;
  968. case 16: return 4;
  969. case 32: return 5;
  970. case 64: return 6;
  971. case 128: return 7;
  972. case 256: return 8;
  973. default: return 0;
  974. }
  975. }
  976. struct mpn_base_info
  977. {
  978. /* bb is the largest power of the base which fits in one limb, and
  979. exp is the corresponding exponent. */
  980. unsigned exp;
  981. mp_limb_t bb;
  982. };
  983. static void
  984. mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
  985. {
  986. mp_limb_t m;
  987. mp_limb_t p;
  988. unsigned exp;
  989. m = GMP_LIMB_MAX / b;
  990. for (exp = 1, p = b; p <= m; exp++)
  991. p *= b;
  992. info->exp = exp;
  993. info->bb = p;
  994. }
  995. static mp_bitcnt_t
  996. mpn_limb_size_in_base_2 (mp_limb_t u)
  997. {
  998. unsigned shift;
  999. assert (u > 0);
  1000. gmp_clz (shift, u);
  1001. return GMP_LIMB_BITS - shift;
  1002. }
  1003. static size_t
  1004. mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
  1005. {
  1006. unsigned char mask;
  1007. size_t sn, j;
  1008. mp_size_t i;
  1009. unsigned shift;
  1010. sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
  1011. + bits - 1) / bits;
  1012. mask = (1U << bits) - 1;
  1013. for (i = 0, j = sn, shift = 0; j-- > 0;)
  1014. {
  1015. unsigned char digit = up[i] >> shift;
  1016. shift += bits;
  1017. if (shift >= GMP_LIMB_BITS && ++i < un)
  1018. {
  1019. shift -= GMP_LIMB_BITS;
  1020. digit |= up[i] << (bits - shift);
  1021. }
  1022. sp[j] = digit & mask;
  1023. }
  1024. return sn;
  1025. }
  1026. /* We generate digits from the least significant end, and reverse at
  1027. the end. */
  1028. static size_t
  1029. mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
  1030. const struct gmp_div_inverse *binv)
  1031. {
  1032. mp_size_t i;
  1033. for (i = 0; w > 0; i++)
  1034. {
  1035. mp_limb_t h, l, r;
  1036. h = w >> (GMP_LIMB_BITS - binv->shift);
  1037. l = w << binv->shift;
  1038. gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
  1039. assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0);
  1040. r >>= binv->shift;
  1041. sp[i] = r;
  1042. }
  1043. return i;
  1044. }
  1045. static size_t
  1046. mpn_get_str_other (unsigned char *sp,
  1047. int base, const struct mpn_base_info *info,
  1048. mp_ptr up, mp_size_t un)
  1049. {
  1050. struct gmp_div_inverse binv;
  1051. size_t sn;
  1052. size_t i;
  1053. mpn_div_qr_1_invert (&binv, base);
  1054. sn = 0;
  1055. if (un > 1)
  1056. {
  1057. struct gmp_div_inverse bbinv;
  1058. mpn_div_qr_1_invert (&bbinv, info->bb);
  1059. do
  1060. {
  1061. mp_limb_t w;
  1062. size_t done;
  1063. w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
  1064. un -= (up[un-1] == 0);
  1065. done = mpn_limb_get_str (sp + sn, w, &binv);
  1066. for (sn += done; done < info->exp; done++)
  1067. sp[sn++] = 0;
  1068. }
  1069. while (un > 1);
  1070. }
  1071. sn += mpn_limb_get_str (sp + sn, up[0], &binv);
  1072. /* Reverse order */
  1073. for (i = 0; 2*i + 1 < sn; i++)
  1074. {
  1075. unsigned char t = sp[i];
  1076. sp[i] = sp[sn - i - 1];
  1077. sp[sn - i - 1] = t;
  1078. }
  1079. return sn;
  1080. }
  1081. size_t
  1082. mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
  1083. {
  1084. unsigned bits;
  1085. assert (un > 0);
  1086. assert (up[un-1] > 0);
  1087. bits = mpn_base_power_of_two_p (base);
  1088. if (bits)
  1089. return mpn_get_str_bits (sp, bits, up, un);
  1090. else
  1091. {
  1092. struct mpn_base_info info;
  1093. mpn_get_base_info (&info, base);
  1094. return mpn_get_str_other (sp, base, &info, up, un);
  1095. }
  1096. }
  1097. static mp_size_t
  1098. mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
  1099. unsigned bits)
  1100. {
  1101. mp_size_t rn;
  1102. size_t j;
  1103. unsigned shift;
  1104. for (j = sn, rn = 0, shift = 0; j-- > 0; )
  1105. {
  1106. if (shift == 0)
  1107. {
  1108. rp[rn++] = sp[j];
  1109. shift += bits;
  1110. }
  1111. else
  1112. {
  1113. rp[rn-1] |= (mp_limb_t) sp[j] << shift;
  1114. shift += bits;
  1115. if (shift >= GMP_LIMB_BITS)
  1116. {
  1117. shift -= GMP_LIMB_BITS;
  1118. if (shift > 0)
  1119. rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
  1120. }
  1121. }
  1122. }
  1123. rn = mpn_normalized_size (rp, rn);
  1124. return rn;
  1125. }
  1126. /* Result is usually normalized, except for all-zero input, in which
  1127. case a single zero limb is written at *RP, and 1 is returned. */
  1128. static mp_size_t
  1129. mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
  1130. mp_limb_t b, const struct mpn_base_info *info)
  1131. {
  1132. mp_size_t rn;
  1133. mp_limb_t w;
  1134. unsigned k;
  1135. size_t j;
  1136. assert (sn > 0);
  1137. k = 1 + (sn - 1) % info->exp;
  1138. j = 0;
  1139. w = sp[j++];
  1140. while (--k != 0)
  1141. w = w * b + sp[j++];
  1142. rp[0] = w;
  1143. for (rn = 1; j < sn;)
  1144. {
  1145. mp_limb_t cy;
  1146. w = sp[j++];
  1147. for (k = 1; k < info->exp; k++)
  1148. w = w * b + sp[j++];
  1149. cy = mpn_mul_1 (rp, rp, rn, info->bb);
  1150. cy += mpn_add_1 (rp, rp, rn, w);
  1151. if (cy > 0)
  1152. rp[rn++] = cy;
  1153. }
  1154. assert (j == sn);
  1155. return rn;
  1156. }
  1157. mp_size_t
  1158. mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
  1159. {
  1160. unsigned bits;
  1161. if (sn == 0)
  1162. return 0;
  1163. bits = mpn_base_power_of_two_p (base);
  1164. if (bits)
  1165. return mpn_set_str_bits (rp, sp, sn, bits);
  1166. else
  1167. {
  1168. struct mpn_base_info info;
  1169. mpn_get_base_info (&info, base);
  1170. return mpn_set_str_other (rp, sp, sn, base, &info);
  1171. }
  1172. }
  1173. /* MPZ interface */
  1174. void
  1175. mpz_init (mpz_t r)
  1176. {
  1177. static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0;
  1178. r->_mp_alloc = 0;
  1179. r->_mp_size = 0;
  1180. r->_mp_d = (mp_ptr) &dummy_limb;
  1181. }
  1182. /* The utility of this function is a bit limited, since many functions
  1183. assigns the result variable using mpz_swap. */
  1184. void
  1185. mpz_init2 (mpz_t r, mp_bitcnt_t bits)
  1186. {
  1187. mp_size_t rn;
  1188. bits -= (bits != 0); /* Round down, except if 0 */
  1189. rn = 1 + bits / GMP_LIMB_BITS;
  1190. r->_mp_alloc = rn;
  1191. r->_mp_size = 0;
  1192. r->_mp_d = gmp_xalloc_limbs (rn);
  1193. }
  1194. void
  1195. mpz_clear (mpz_t r)
  1196. {
  1197. if (r->_mp_alloc)
  1198. gmp_free (r->_mp_d);
  1199. }
  1200. static mp_ptr
  1201. mpz_realloc (mpz_t r, mp_size_t size)
  1202. {
  1203. size = GMP_MAX (size, 1);
  1204. if (r->_mp_alloc)
  1205. r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
  1206. else
  1207. r->_mp_d = gmp_xalloc_limbs (size);
  1208. r->_mp_alloc = size;
  1209. if (GMP_ABS (r->_mp_size) > size)
  1210. r->_mp_size = 0;
  1211. return r->_mp_d;
  1212. }
  1213. /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
  1214. #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
  1215. ? mpz_realloc(z,n) \
  1216. : (z)->_mp_d)
  1217. /* MPZ assignment and basic conversions. */
  1218. void
  1219. mpz_set_si (mpz_t r, intptr_t x)
  1220. {
  1221. if (x >= 0)
  1222. mpz_set_ui (r, x);
  1223. else /* (x < 0) */
  1224. if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1225. {
  1226. mpz_set_ui (r, GMP_NEG_CAST (uintptr_t, x));
  1227. mpz_neg (r, r);
  1228. }
  1229. else
  1230. {
  1231. r->_mp_size = -1;
  1232. MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (uintptr_t, x);
  1233. }
  1234. }
  1235. void
  1236. mpz_set_ui (mpz_t r, uintptr_t x)
  1237. {
  1238. if (x > 0)
  1239. {
  1240. r->_mp_size = 1;
  1241. MPZ_REALLOC (r, 1)[0] = x;
  1242. if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1243. {
  1244. int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
  1245. while (x >>= LOCAL_GMP_LIMB_BITS)
  1246. {
  1247. ++ r->_mp_size;
  1248. MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x;
  1249. }
  1250. }
  1251. }
  1252. else
  1253. r->_mp_size = 0;
  1254. }
  1255. void
  1256. mpz_set (mpz_t r, const mpz_t x)
  1257. {
  1258. /* Allow the NOP r == x */
  1259. if (r != x)
  1260. {
  1261. mp_size_t n;
  1262. mp_ptr rp;
  1263. n = GMP_ABS (x->_mp_size);
  1264. rp = MPZ_REALLOC (r, n);
  1265. mpn_copyi (rp, x->_mp_d, n);
  1266. r->_mp_size = x->_mp_size;
  1267. }
  1268. }
  1269. void
  1270. mpz_init_set_si (mpz_t r, intptr_t x)
  1271. {
  1272. mpz_init (r);
  1273. mpz_set_si (r, x);
  1274. }
  1275. void
  1276. mpz_init_set_ui (mpz_t r, uintptr_t x)
  1277. {
  1278. mpz_init (r);
  1279. mpz_set_ui (r, x);
  1280. }
  1281. void
  1282. mpz_init_set (mpz_t r, const mpz_t x)
  1283. {
  1284. mpz_init (r);
  1285. mpz_set (r, x);
  1286. }
  1287. int
  1288. mpz_fits_slong_p (const mpz_t u)
  1289. {
  1290. return (INTPTR_MAX + INTPTR_MIN == 0 || mpz_cmp_ui (u, INTPTR_MAX) <= 0) &&
  1291. mpz_cmpabs_ui (u, GMP_NEG_CAST (uintptr_t, INTPTR_MIN)) <= 0;
  1292. }
  1293. static int
  1294. mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un)
  1295. {
  1296. int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS;
  1297. mp_limb_t ulongrem = 0;
  1298. if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0)
  1299. ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1;
  1300. return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1);
  1301. }
  1302. int
  1303. mpz_fits_ulong_p (const mpz_t u)
  1304. {
  1305. mp_size_t us = u->_mp_size;
  1306. return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us);
  1307. }
  1308. intptr_t
  1309. mpz_get_si (const mpz_t u)
  1310. {
  1311. uintptr_t r = mpz_get_ui (u);
  1312. uintptr_t c = -INTPTR_MAX - INTPTR_MIN;
  1313. if (u->_mp_size < 0)
  1314. /* This expression is necessary to properly handle -INTPTR_MIN */
  1315. return -(intptr_t) c - (intptr_t) ((r - c) & INTPTR_MAX);
  1316. else
  1317. return (intptr_t) (r & INTPTR_MAX);
  1318. }
  1319. uintptr_t
  1320. mpz_get_ui (const mpz_t u)
  1321. {
  1322. if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1323. {
  1324. int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
  1325. uintptr_t r = 0;
  1326. mp_size_t n = GMP_ABS (u->_mp_size);
  1327. n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS);
  1328. while (--n >= 0)
  1329. r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n];
  1330. return r;
  1331. }
  1332. return u->_mp_size == 0 ? 0 : u->_mp_d[0];
  1333. }
  1334. size_t
  1335. mpz_size (const mpz_t u)
  1336. {
  1337. return GMP_ABS (u->_mp_size);
  1338. }
  1339. mp_limb_t
  1340. mpz_getlimbn (const mpz_t u, mp_size_t n)
  1341. {
  1342. if (n >= 0 && n < GMP_ABS (u->_mp_size))
  1343. return u->_mp_d[n];
  1344. else
  1345. return 0;
  1346. }
  1347. void
  1348. mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
  1349. {
  1350. mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
  1351. }
  1352. mp_srcptr
  1353. mpz_limbs_read (mpz_srcptr x)
  1354. {
  1355. return x->_mp_d;
  1356. }
  1357. mp_ptr
  1358. mpz_limbs_modify (mpz_t x, mp_size_t n)
  1359. {
  1360. assert (n > 0);
  1361. return MPZ_REALLOC (x, n);
  1362. }
  1363. mp_ptr
  1364. mpz_limbs_write (mpz_t x, mp_size_t n)
  1365. {
  1366. return mpz_limbs_modify (x, n);
  1367. }
  1368. void
  1369. mpz_limbs_finish (mpz_t x, mp_size_t xs)
  1370. {
  1371. mp_size_t xn;
  1372. xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
  1373. x->_mp_size = xs < 0 ? -xn : xn;
  1374. }
  1375. static mpz_srcptr
  1376. mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
  1377. {
  1378. x->_mp_alloc = 0;
  1379. x->_mp_d = (mp_ptr) xp;
  1380. x->_mp_size = xs;
  1381. return x;
  1382. }
  1383. mpz_srcptr
  1384. mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
  1385. {
  1386. mpz_roinit_normal_n (x, xp, xs);
  1387. mpz_limbs_finish (x, xs);
  1388. return x;
  1389. }
  1390. /* Conversions and comparison to double. */
  1391. void
  1392. mpz_set_d (mpz_t r, double x)
  1393. {
  1394. int sign;
  1395. mp_ptr rp;
  1396. mp_size_t rn, i;
  1397. double B;
  1398. double Bi;
  1399. mp_limb_t f;
  1400. /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
  1401. zero or infinity. */
  1402. if (x != x || x == x * 0.5)
  1403. {
  1404. r->_mp_size = 0;
  1405. return;
  1406. }
  1407. sign = x < 0.0 ;
  1408. if (sign)
  1409. x = - x;
  1410. if (x < 1.0)
  1411. {
  1412. r->_mp_size = 0;
  1413. return;
  1414. }
  1415. B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1416. Bi = 1.0 / B;
  1417. for (rn = 1; x >= B; rn++)
  1418. x *= Bi;
  1419. rp = MPZ_REALLOC (r, rn);
  1420. f = (mp_limb_t) x;
  1421. x -= f;
  1422. assert (x < 1.0);
  1423. i = rn-1;
  1424. rp[i] = f;
  1425. while (--i >= 0)
  1426. {
  1427. x = B * x;
  1428. f = (mp_limb_t) x;
  1429. x -= f;
  1430. assert (x < 1.0);
  1431. rp[i] = f;
  1432. }
  1433. r->_mp_size = sign ? - rn : rn;
  1434. }
  1435. void
  1436. mpz_init_set_d (mpz_t r, double x)
  1437. {
  1438. mpz_init (r);
  1439. mpz_set_d (r, x);
  1440. }
  1441. double
  1442. mpz_get_d (const mpz_t u)
  1443. {
  1444. int m;
  1445. mp_limb_t l;
  1446. mp_size_t un;
  1447. double x;
  1448. double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1449. un = GMP_ABS (u->_mp_size);
  1450. if (un == 0)
  1451. return 0.0;
  1452. l = u->_mp_d[--un];
  1453. gmp_clz (m, l);
  1454. m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS;
  1455. if (m < 0)
  1456. l &= GMP_LIMB_MAX << -m;
  1457. for (x = l; --un >= 0;)
  1458. {
  1459. x = B*x;
  1460. if (m > 0) {
  1461. l = u->_mp_d[un];
  1462. m -= GMP_LIMB_BITS;
  1463. if (m < 0)
  1464. l &= GMP_LIMB_MAX << -m;
  1465. x += l;
  1466. }
  1467. }
  1468. if (u->_mp_size < 0)
  1469. x = -x;
  1470. return x;
  1471. }
  1472. int
  1473. mpz_cmpabs_d (const mpz_t x, double d)
  1474. {
  1475. mp_size_t xn;
  1476. double B, Bi;
  1477. mp_size_t i;
  1478. xn = x->_mp_size;
  1479. d = GMP_ABS (d);
  1480. if (xn != 0)
  1481. {
  1482. xn = GMP_ABS (xn);
  1483. B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1484. Bi = 1.0 / B;
  1485. /* Scale d so it can be compared with the top limb. */
  1486. for (i = 1; i < xn; i++)
  1487. d *= Bi;
  1488. if (d >= B)
  1489. return -1;
  1490. /* Compare floor(d) to top limb, subtract and cancel when equal. */
  1491. for (i = xn; i-- > 0;)
  1492. {
  1493. mp_limb_t f, xl;
  1494. f = (mp_limb_t) d;
  1495. xl = x->_mp_d[i];
  1496. if (xl > f)
  1497. return 1;
  1498. else if (xl < f)
  1499. return -1;
  1500. d = B * (d - f);
  1501. }
  1502. }
  1503. return - (d > 0.0);
  1504. }
  1505. int
  1506. mpz_cmp_d (const mpz_t x, double d)
  1507. {
  1508. if (x->_mp_size < 0)
  1509. {
  1510. if (d >= 0.0)
  1511. return -1;
  1512. else
  1513. return -mpz_cmpabs_d (x, d);
  1514. }
  1515. else
  1516. {
  1517. if (d < 0.0)
  1518. return 1;
  1519. else
  1520. return mpz_cmpabs_d (x, d);
  1521. }
  1522. }
  1523. /* MPZ comparisons and the like. */
  1524. int
  1525. mpz_sgn (const mpz_t u)
  1526. {
  1527. return GMP_CMP (u->_mp_size, 0);
  1528. }
  1529. int
  1530. mpz_cmp_si (const mpz_t u, intptr_t v)
  1531. {
  1532. mp_size_t usize = u->_mp_size;
  1533. if (v >= 0)
  1534. return mpz_cmp_ui (u, v);
  1535. else if (usize >= 0)
  1536. return 1;
  1537. else
  1538. return - mpz_cmpabs_ui (u, GMP_NEG_CAST (uintptr_t, v));
  1539. }
  1540. int
  1541. mpz_cmp_ui (const mpz_t u, uintptr_t v)
  1542. {
  1543. mp_size_t usize = u->_mp_size;
  1544. if (usize < 0)
  1545. return -1;
  1546. else
  1547. return mpz_cmpabs_ui (u, v);
  1548. }
  1549. int
  1550. mpz_cmp (const mpz_t a, const mpz_t b)
  1551. {
  1552. mp_size_t asize = a->_mp_size;
  1553. mp_size_t bsize = b->_mp_size;
  1554. if (asize != bsize)
  1555. return (asize < bsize) ? -1 : 1;
  1556. else if (asize >= 0)
  1557. return mpn_cmp (a->_mp_d, b->_mp_d, asize);
  1558. else
  1559. return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
  1560. }
  1561. int
  1562. mpz_cmpabs_ui (const mpz_t u, uintptr_t v)
  1563. {
  1564. mp_size_t un = GMP_ABS (u->_mp_size);
  1565. if (! mpn_absfits_ulong_p (u->_mp_d, un))
  1566. return 1;
  1567. else
  1568. {
  1569. uintptr_t uu = mpz_get_ui (u);
  1570. return GMP_CMP(uu, v);
  1571. }
  1572. }
  1573. int
  1574. mpz_cmpabs (const mpz_t u, const mpz_t v)
  1575. {
  1576. return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
  1577. v->_mp_d, GMP_ABS (v->_mp_size));
  1578. }
  1579. void
  1580. mpz_abs (mpz_t r, const mpz_t u)
  1581. {
  1582. mpz_set (r, u);
  1583. r->_mp_size = GMP_ABS (r->_mp_size);
  1584. }
  1585. void
  1586. mpz_neg (mpz_t r, const mpz_t u)
  1587. {
  1588. mpz_set (r, u);
  1589. r->_mp_size = -r->_mp_size;
  1590. }
  1591. void
  1592. mpz_swap (mpz_t u, mpz_t v)
  1593. {
  1594. MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
  1595. MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
  1596. MP_PTR_SWAP (u->_mp_d, v->_mp_d);
  1597. }
  1598. /* MPZ addition and subtraction */
  1599. void
  1600. mpz_add_ui (mpz_t r, const mpz_t a, uintptr_t b)
  1601. {
  1602. mpz_t bb;
  1603. mpz_init_set_ui (bb, b);
  1604. mpz_add (r, a, bb);
  1605. mpz_clear (bb);
  1606. }
  1607. void
  1608. mpz_sub_ui (mpz_t r, const mpz_t a, uintptr_t b)
  1609. {
  1610. mpz_ui_sub (r, b, a);
  1611. mpz_neg (r, r);
  1612. }
  1613. void
  1614. mpz_ui_sub (mpz_t r, uintptr_t a, const mpz_t b)
  1615. {
  1616. mpz_neg (r, b);
  1617. mpz_add_ui (r, r, a);
  1618. }
  1619. static mp_size_t
  1620. mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
  1621. {
  1622. mp_size_t an = GMP_ABS (a->_mp_size);
  1623. mp_size_t bn = GMP_ABS (b->_mp_size);
  1624. mp_ptr rp;
  1625. mp_limb_t cy;
  1626. if (an < bn)
  1627. {
  1628. MPZ_SRCPTR_SWAP (a, b);
  1629. MP_SIZE_T_SWAP (an, bn);
  1630. }
  1631. rp = MPZ_REALLOC (r, an + 1);
  1632. cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
  1633. rp[an] = cy;
  1634. return an + cy;
  1635. }
  1636. static mp_size_t
  1637. mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
  1638. {
  1639. mp_size_t an = GMP_ABS (a->_mp_size);
  1640. mp_size_t bn = GMP_ABS (b->_mp_size);
  1641. int cmp;
  1642. mp_ptr rp;
  1643. cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
  1644. if (cmp > 0)
  1645. {
  1646. rp = MPZ_REALLOC (r, an);
  1647. gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
  1648. return mpn_normalized_size (rp, an);
  1649. }
  1650. else if (cmp < 0)
  1651. {
  1652. rp = MPZ_REALLOC (r, bn);
  1653. gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
  1654. return -mpn_normalized_size (rp, bn);
  1655. }
  1656. else
  1657. return 0;
  1658. }
  1659. void
  1660. mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
  1661. {
  1662. mp_size_t rn;
  1663. if ( (a->_mp_size ^ b->_mp_size) >= 0)
  1664. rn = mpz_abs_add (r, a, b);
  1665. else
  1666. rn = mpz_abs_sub (r, a, b);
  1667. r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  1668. }
  1669. void
  1670. mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
  1671. {
  1672. mp_size_t rn;
  1673. if ( (a->_mp_size ^ b->_mp_size) >= 0)
  1674. rn = mpz_abs_sub (r, a, b);
  1675. else
  1676. rn = mpz_abs_add (r, a, b);
  1677. r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  1678. }
  1679. /* MPZ multiplication */
  1680. void
  1681. mpz_mul_si (mpz_t r, const mpz_t u, intptr_t v)
  1682. {
  1683. if (v < 0)
  1684. {
  1685. mpz_mul_ui (r, u, GMP_NEG_CAST (uintptr_t, v));
  1686. mpz_neg (r, r);
  1687. }
  1688. else
  1689. mpz_mul_ui (r, u, v);
  1690. }
  1691. void
  1692. mpz_mul_ui (mpz_t r, const mpz_t u, uintptr_t v)
  1693. {
  1694. mpz_t vv;
  1695. mpz_init_set_ui (vv, v);
  1696. mpz_mul (r, u, vv);
  1697. mpz_clear (vv);
  1698. return;
  1699. }
  1700. void
  1701. mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
  1702. {
  1703. int sign;
  1704. mp_size_t un, vn, rn;
  1705. mpz_t t;
  1706. mp_ptr tp;
  1707. un = u->_mp_size;
  1708. vn = v->_mp_size;
  1709. if (un == 0 || vn == 0)
  1710. {
  1711. r->_mp_size = 0;
  1712. return;
  1713. }
  1714. sign = (un ^ vn) < 0;
  1715. un = GMP_ABS (un);
  1716. vn = GMP_ABS (vn);
  1717. mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
  1718. tp = t->_mp_d;
  1719. if (un >= vn)
  1720. mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
  1721. else
  1722. mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
  1723. rn = un + vn;
  1724. rn -= tp[rn-1] == 0;
  1725. t->_mp_size = sign ? - rn : rn;
  1726. mpz_swap (r, t);
  1727. mpz_clear (t);
  1728. }
  1729. void
  1730. mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
  1731. {
  1732. mp_size_t un, rn;
  1733. mp_size_t limbs;
  1734. unsigned shift;
  1735. mp_ptr rp;
  1736. un = GMP_ABS (u->_mp_size);
  1737. if (un == 0)
  1738. {
  1739. r->_mp_size = 0;
  1740. return;
  1741. }
  1742. limbs = bits / GMP_LIMB_BITS;
  1743. shift = bits % GMP_LIMB_BITS;
  1744. rn = un + limbs + (shift > 0);
  1745. rp = MPZ_REALLOC (r, rn);
  1746. if (shift > 0)
  1747. {
  1748. mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
  1749. rp[rn-1] = cy;
  1750. rn -= (cy == 0);
  1751. }
  1752. else
  1753. mpn_copyd (rp + limbs, u->_mp_d, un);
  1754. mpn_zero (rp, limbs);
  1755. r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
  1756. }
  1757. void
  1758. mpz_addmul_ui (mpz_t r, const mpz_t u, uintptr_t v)
  1759. {
  1760. mpz_t t;
  1761. mpz_init_set_ui (t, v);
  1762. mpz_mul (t, u, t);
  1763. mpz_add (r, r, t);
  1764. mpz_clear (t);
  1765. }
  1766. void
  1767. mpz_submul_ui (mpz_t r, const mpz_t u, uintptr_t v)
  1768. {
  1769. mpz_t t;
  1770. mpz_init_set_ui (t, v);
  1771. mpz_mul (t, u, t);
  1772. mpz_sub (r, r, t);
  1773. mpz_clear (t);
  1774. }
  1775. void
  1776. mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
  1777. {
  1778. mpz_t t;
  1779. mpz_init (t);
  1780. mpz_mul (t, u, v);
  1781. mpz_add (r, r, t);
  1782. mpz_clear (t);
  1783. }
  1784. void
  1785. mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
  1786. {
  1787. mpz_t t;
  1788. mpz_init (t);
  1789. mpz_mul (t, u, v);
  1790. mpz_sub (r, r, t);
  1791. mpz_clear (t);
  1792. }
  1793. /* MPZ division */
  1794. enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
  1795. /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
  1796. static int
  1797. mpz_div_qr (mpz_t q, mpz_t r,
  1798. const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
  1799. {
  1800. mp_size_t ns, ds, nn, dn, qs;
  1801. ns = n->_mp_size;
  1802. ds = d->_mp_size;
  1803. if (ds == 0)
  1804. gmp_die("mpz_div_qr: Divide by zero.");
  1805. if (ns == 0)
  1806. {
  1807. if (q)
  1808. q->_mp_size = 0;
  1809. if (r)
  1810. r->_mp_size = 0;
  1811. return 0;
  1812. }
  1813. nn = GMP_ABS (ns);
  1814. dn = GMP_ABS (ds);
  1815. qs = ds ^ ns;
  1816. if (nn < dn)
  1817. {
  1818. if (mode == GMP_DIV_CEIL && qs >= 0)
  1819. {
  1820. /* q = 1, r = n - d */
  1821. if (r)
  1822. mpz_sub (r, n, d);
  1823. if (q)
  1824. mpz_set_ui (q, 1);
  1825. }
  1826. else if (mode == GMP_DIV_FLOOR && qs < 0)
  1827. {
  1828. /* q = -1, r = n + d */
  1829. if (r)
  1830. mpz_add (r, n, d);
  1831. if (q)
  1832. mpz_set_si (q, -1);
  1833. }
  1834. else
  1835. {
  1836. /* q = 0, r = d */
  1837. if (r)
  1838. mpz_set (r, n);
  1839. if (q)
  1840. q->_mp_size = 0;
  1841. }
  1842. return 1;
  1843. }
  1844. else
  1845. {
  1846. mp_ptr np, qp;
  1847. mp_size_t qn, rn;
  1848. mpz_t tq, tr;
  1849. mpz_init_set (tr, n);
  1850. np = tr->_mp_d;
  1851. qn = nn - dn + 1;
  1852. if (q)
  1853. {
  1854. mpz_init2 (tq, qn * GMP_LIMB_BITS);
  1855. qp = tq->_mp_d;
  1856. }
  1857. else
  1858. qp = NULL;
  1859. mpn_div_qr (qp, np, nn, d->_mp_d, dn);
  1860. if (qp)
  1861. {
  1862. qn -= (qp[qn-1] == 0);
  1863. tq->_mp_size = qs < 0 ? -qn : qn;
  1864. }
  1865. rn = mpn_normalized_size (np, dn);
  1866. tr->_mp_size = ns < 0 ? - rn : rn;
  1867. if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
  1868. {
  1869. if (q)
  1870. mpz_sub_ui (tq, tq, 1);
  1871. if (r)
  1872. mpz_add (tr, tr, d);
  1873. }
  1874. else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
  1875. {
  1876. if (q)
  1877. mpz_add_ui (tq, tq, 1);
  1878. if (r)
  1879. mpz_sub (tr, tr, d);
  1880. }
  1881. if (q)
  1882. {
  1883. mpz_swap (tq, q);
  1884. mpz_clear (tq);
  1885. }
  1886. if (r)
  1887. mpz_swap (tr, r);
  1888. mpz_clear (tr);
  1889. return rn != 0;
  1890. }
  1891. }
  1892. void
  1893. mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1894. {
  1895. mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
  1896. }
  1897. void
  1898. mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1899. {
  1900. mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
  1901. }
  1902. void
  1903. mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1904. {
  1905. mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
  1906. }
  1907. void
  1908. mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1909. {
  1910. mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
  1911. }
  1912. void
  1913. mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1914. {
  1915. mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
  1916. }
  1917. void
  1918. mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1919. {
  1920. mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
  1921. }
  1922. void
  1923. mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1924. {
  1925. mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
  1926. }
  1927. void
  1928. mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1929. {
  1930. mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
  1931. }
  1932. void
  1933. mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1934. {
  1935. mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
  1936. }
  1937. void
  1938. mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
  1939. {
  1940. mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
  1941. }
  1942. static void
  1943. mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
  1944. enum mpz_div_round_mode mode)
  1945. {
  1946. mp_size_t un, qn;
  1947. mp_size_t limb_cnt;
  1948. mp_ptr qp;
  1949. int adjust;
  1950. un = u->_mp_size;
  1951. if (un == 0)
  1952. {
  1953. q->_mp_size = 0;
  1954. return;
  1955. }
  1956. limb_cnt = bit_index / GMP_LIMB_BITS;
  1957. qn = GMP_ABS (un) - limb_cnt;
  1958. bit_index %= GMP_LIMB_BITS;
  1959. if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
  1960. /* Note: Below, the final indexing at limb_cnt is valid because at
  1961. that point we have qn > 0. */
  1962. adjust = (qn <= 0
  1963. || !mpn_zero_p (u->_mp_d, limb_cnt)
  1964. || (u->_mp_d[limb_cnt]
  1965. & (((mp_limb_t) 1 << bit_index) - 1)));
  1966. else
  1967. adjust = 0;
  1968. if (qn <= 0)
  1969. qn = 0;
  1970. else
  1971. {
  1972. qp = MPZ_REALLOC (q, qn);
  1973. if (bit_index != 0)
  1974. {
  1975. mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
  1976. qn -= qp[qn - 1] == 0;
  1977. }
  1978. else
  1979. {
  1980. mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
  1981. }
  1982. }
  1983. q->_mp_size = qn;
  1984. if (adjust)
  1985. mpz_add_ui (q, q, 1);
  1986. if (un < 0)
  1987. mpz_neg (q, q);
  1988. }
  1989. static void
  1990. mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
  1991. enum mpz_div_round_mode mode)
  1992. {
  1993. mp_size_t us, un, rn;
  1994. mp_ptr rp;
  1995. mp_limb_t mask;
  1996. us = u->_mp_size;
  1997. if (us == 0 || bit_index == 0)
  1998. {
  1999. r->_mp_size = 0;
  2000. return;
  2001. }
  2002. rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  2003. assert (rn > 0);
  2004. rp = MPZ_REALLOC (r, rn);
  2005. un = GMP_ABS (us);
  2006. mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
  2007. if (rn > un)
  2008. {
  2009. /* Quotient (with truncation) is zero, and remainder is
  2010. non-zero */
  2011. if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  2012. {
  2013. /* Have to negate and sign extend. */
  2014. mp_size_t i;
  2015. gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un));
  2016. for (i = un; i < rn - 1; i++)
  2017. rp[i] = GMP_LIMB_MAX;
  2018. rp[rn-1] = mask;
  2019. us = -us;
  2020. }
  2021. else
  2022. {
  2023. /* Just copy */
  2024. if (r != u)
  2025. mpn_copyi (rp, u->_mp_d, un);
  2026. rn = un;
  2027. }
  2028. }
  2029. else
  2030. {
  2031. if (r != u)
  2032. mpn_copyi (rp, u->_mp_d, rn - 1);
  2033. rp[rn-1] = u->_mp_d[rn-1] & mask;
  2034. if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  2035. {
  2036. /* If r != 0, compute 2^{bit_count} - r. */
  2037. mpn_neg (rp, rp, rn);
  2038. rp[rn-1] &= mask;
  2039. /* us is not used for anything else, so we can modify it
  2040. here to indicate flipped sign. */
  2041. us = -us;
  2042. }
  2043. }
  2044. rn = mpn_normalized_size (rp, rn);
  2045. r->_mp_size = us < 0 ? -rn : rn;
  2046. }
  2047. void
  2048. mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2049. {
  2050. mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
  2051. }
  2052. void
  2053. mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2054. {
  2055. mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
  2056. }
  2057. void
  2058. mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2059. {
  2060. mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
  2061. }
  2062. void
  2063. mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2064. {
  2065. mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
  2066. }
  2067. void
  2068. mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2069. {
  2070. mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
  2071. }
  2072. void
  2073. mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2074. {
  2075. mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
  2076. }
  2077. void
  2078. mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
  2079. {
  2080. gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
  2081. }
  2082. int
  2083. mpz_divisible_p (const mpz_t n, const mpz_t d)
  2084. {
  2085. return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  2086. }
  2087. int
  2088. mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
  2089. {
  2090. mpz_t t;
  2091. int res;
  2092. /* a == b (mod 0) iff a == b */
  2093. if (mpz_sgn (m) == 0)
  2094. return (mpz_cmp (a, b) == 0);
  2095. mpz_init (t);
  2096. mpz_sub (t, a, b);
  2097. res = mpz_divisible_p (t, m);
  2098. mpz_clear (t);
  2099. return res;
  2100. }
  2101. static uintptr_t
  2102. mpz_div_qr_ui (mpz_t q, mpz_t r,
  2103. const mpz_t n, uintptr_t d, enum mpz_div_round_mode mode)
  2104. {
  2105. uintptr_t ret;
  2106. mpz_t rr, dd;
  2107. mpz_init (rr);
  2108. mpz_init_set_ui (dd, d);
  2109. mpz_div_qr (q, rr, n, dd, mode);
  2110. mpz_clear (dd);
  2111. ret = mpz_get_ui (rr);
  2112. if (r)
  2113. mpz_swap (r, rr);
  2114. mpz_clear (rr);
  2115. return ret;
  2116. }
  2117. uintptr_t
  2118. mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, uintptr_t d)
  2119. {
  2120. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
  2121. }
  2122. uintptr_t
  2123. mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, uintptr_t d)
  2124. {
  2125. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
  2126. }
  2127. uintptr_t
  2128. mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, uintptr_t d)
  2129. {
  2130. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
  2131. }
  2132. uintptr_t
  2133. mpz_cdiv_q_ui (mpz_t q, const mpz_t n, uintptr_t d)
  2134. {
  2135. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
  2136. }
  2137. uintptr_t
  2138. mpz_fdiv_q_ui (mpz_t q, const mpz_t n, uintptr_t d)
  2139. {
  2140. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
  2141. }
  2142. uintptr_t
  2143. mpz_tdiv_q_ui (mpz_t q, const mpz_t n, uintptr_t d)
  2144. {
  2145. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
  2146. }
  2147. uintptr_t
  2148. mpz_cdiv_r_ui (mpz_t r, const mpz_t n, uintptr_t d)
  2149. {
  2150. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
  2151. }
  2152. uintptr_t
  2153. mpz_fdiv_r_ui (mpz_t r, const mpz_t n, uintptr_t d)
  2154. {
  2155. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2156. }
  2157. uintptr_t
  2158. mpz_tdiv_r_ui (mpz_t r, const mpz_t n, uintptr_t d)
  2159. {
  2160. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
  2161. }
  2162. uintptr_t
  2163. mpz_cdiv_ui (const mpz_t n, uintptr_t d)
  2164. {
  2165. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
  2166. }
  2167. uintptr_t
  2168. mpz_fdiv_ui (const mpz_t n, uintptr_t d)
  2169. {
  2170. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
  2171. }
  2172. uintptr_t
  2173. mpz_tdiv_ui (const mpz_t n, uintptr_t d)
  2174. {
  2175. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
  2176. }
  2177. uintptr_t
  2178. mpz_mod_ui (mpz_t r, const mpz_t n, uintptr_t d)
  2179. {
  2180. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2181. }
  2182. void
  2183. mpz_divexact_ui (mpz_t q, const mpz_t n, uintptr_t d)
  2184. {
  2185. gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
  2186. }
  2187. int
  2188. mpz_divisible_ui_p (const mpz_t n, uintptr_t d)
  2189. {
  2190. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  2191. }
  2192. /* GCD */
  2193. static mp_limb_t
  2194. mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
  2195. {
  2196. unsigned shift;
  2197. assert ( (u | v) > 0);
  2198. if (u == 0)
  2199. return v;
  2200. else if (v == 0)
  2201. return u;
  2202. gmp_ctz (shift, u | v);
  2203. u >>= shift;
  2204. v >>= shift;
  2205. if ( (u & 1) == 0)
  2206. MP_LIMB_T_SWAP (u, v);
  2207. while ( (v & 1) == 0)
  2208. v >>= 1;
  2209. while (u != v)
  2210. {
  2211. if (u > v)
  2212. {
  2213. u -= v;
  2214. do
  2215. u >>= 1;
  2216. while ( (u & 1) == 0);
  2217. }
  2218. else
  2219. {
  2220. v -= u;
  2221. do
  2222. v >>= 1;
  2223. while ( (v & 1) == 0);
  2224. }
  2225. }
  2226. return u << shift;
  2227. }
  2228. uintptr_t
  2229. mpz_gcd_ui (mpz_t g, const mpz_t u, uintptr_t v)
  2230. {
  2231. mpz_t t;
  2232. mpz_init_set_ui(t, v);
  2233. mpz_gcd (t, u, t);
  2234. if (v > 0)
  2235. v = mpz_get_ui (t);
  2236. if (g)
  2237. mpz_swap (t, g);
  2238. mpz_clear (t);
  2239. return v;
  2240. }
  2241. static mp_bitcnt_t
  2242. mpz_make_odd (mpz_t r)
  2243. {
  2244. mp_bitcnt_t shift;
  2245. assert (r->_mp_size > 0);
  2246. /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
  2247. shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
  2248. mpz_tdiv_q_2exp (r, r, shift);
  2249. return shift;
  2250. }
  2251. void
  2252. mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
  2253. {
  2254. mpz_t tu, tv;
  2255. mp_bitcnt_t uz, vz, gz;
  2256. if (u->_mp_size == 0)
  2257. {
  2258. mpz_abs (g, v);
  2259. return;
  2260. }
  2261. if (v->_mp_size == 0)
  2262. {
  2263. mpz_abs (g, u);
  2264. return;
  2265. }
  2266. mpz_init (tu);
  2267. mpz_init (tv);
  2268. mpz_abs (tu, u);
  2269. uz = mpz_make_odd (tu);
  2270. mpz_abs (tv, v);
  2271. vz = mpz_make_odd (tv);
  2272. gz = GMP_MIN (uz, vz);
  2273. if (tu->_mp_size < tv->_mp_size)
  2274. mpz_swap (tu, tv);
  2275. mpz_tdiv_r (tu, tu, tv);
  2276. if (tu->_mp_size == 0)
  2277. {
  2278. mpz_swap (g, tv);
  2279. }
  2280. else
  2281. for (;;)
  2282. {
  2283. int c;
  2284. mpz_make_odd (tu);
  2285. c = mpz_cmp (tu, tv);
  2286. if (c == 0)
  2287. {
  2288. mpz_swap (g, tu);
  2289. break;
  2290. }
  2291. if (c < 0)
  2292. mpz_swap (tu, tv);
  2293. if (tv->_mp_size == 1)
  2294. {
  2295. mp_limb_t vl = tv->_mp_d[0];
  2296. mp_limb_t ul = mpz_tdiv_ui (tu, vl);
  2297. mpz_set_ui (g, mpn_gcd_11 (ul, vl));
  2298. break;
  2299. }
  2300. mpz_sub (tu, tu, tv);
  2301. }
  2302. mpz_clear (tu);
  2303. mpz_clear (tv);
  2304. mpz_mul_2exp (g, g, gz);
  2305. }
  2306. void
  2307. mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
  2308. {
  2309. mpz_t tu, tv, s0, s1, t0, t1;
  2310. mp_bitcnt_t uz, vz, gz;
  2311. mp_bitcnt_t power;
  2312. if (u->_mp_size == 0)
  2313. {
  2314. /* g = 0 u + sgn(v) v */
  2315. intptr_t sign = mpz_sgn (v);
  2316. mpz_abs (g, v);
  2317. if (s)
  2318. s->_mp_size = 0;
  2319. if (t)
  2320. mpz_set_si (t, sign);
  2321. return;
  2322. }
  2323. if (v->_mp_size == 0)
  2324. {
  2325. /* g = sgn(u) u + 0 v */
  2326. intptr_t sign = mpz_sgn (u);
  2327. mpz_abs (g, u);
  2328. if (s)
  2329. mpz_set_si (s, sign);
  2330. if (t)
  2331. t->_mp_size = 0;
  2332. return;
  2333. }
  2334. mpz_init (tu);
  2335. mpz_init (tv);
  2336. mpz_init (s0);
  2337. mpz_init (s1);
  2338. mpz_init (t0);
  2339. mpz_init (t1);
  2340. mpz_abs (tu, u);
  2341. uz = mpz_make_odd (tu);
  2342. mpz_abs (tv, v);
  2343. vz = mpz_make_odd (tv);
  2344. gz = GMP_MIN (uz, vz);
  2345. uz -= gz;
  2346. vz -= gz;
  2347. /* Cofactors corresponding to odd gcd. gz handled later. */
  2348. if (tu->_mp_size < tv->_mp_size)
  2349. {
  2350. mpz_swap (tu, tv);
  2351. MPZ_SRCPTR_SWAP (u, v);
  2352. MPZ_PTR_SWAP (s, t);
  2353. MP_BITCNT_T_SWAP (uz, vz);
  2354. }
  2355. /* Maintain
  2356. *
  2357. * u = t0 tu + t1 tv
  2358. * v = s0 tu + s1 tv
  2359. *
  2360. * where u and v denote the inputs with common factors of two
  2361. * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
  2362. *
  2363. * 2^p tu = s1 u - t1 v
  2364. * 2^p tv = -s0 u + t0 v
  2365. */
  2366. /* After initial division, tu = q tv + tu', we have
  2367. *
  2368. * u = 2^uz (tu' + q tv)
  2369. * v = 2^vz tv
  2370. *
  2371. * or
  2372. *
  2373. * t0 = 2^uz, t1 = 2^uz q
  2374. * s0 = 0, s1 = 2^vz
  2375. */
  2376. mpz_setbit (t0, uz);
  2377. mpz_tdiv_qr (t1, tu, tu, tv);
  2378. mpz_mul_2exp (t1, t1, uz);
  2379. mpz_setbit (s1, vz);
  2380. power = uz + vz;
  2381. if (tu->_mp_size > 0)
  2382. {
  2383. mp_bitcnt_t shift;
  2384. shift = mpz_make_odd (tu);
  2385. mpz_mul_2exp (t0, t0, shift);
  2386. mpz_mul_2exp (s0, s0, shift);
  2387. power += shift;
  2388. for (;;)
  2389. {
  2390. int c;
  2391. c = mpz_cmp (tu, tv);
  2392. if (c == 0)
  2393. break;
  2394. if (c < 0)
  2395. {
  2396. /* tv = tv' + tu
  2397. *
  2398. * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
  2399. * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
  2400. mpz_sub (tv, tv, tu);
  2401. mpz_add (t0, t0, t1);
  2402. mpz_add (s0, s0, s1);
  2403. shift = mpz_make_odd (tv);
  2404. mpz_mul_2exp (t1, t1, shift);
  2405. mpz_mul_2exp (s1, s1, shift);
  2406. }
  2407. else
  2408. {
  2409. mpz_sub (tu, tu, tv);
  2410. mpz_add (t1, t0, t1);
  2411. mpz_add (s1, s0, s1);
  2412. shift = mpz_make_odd (tu);
  2413. mpz_mul_2exp (t0, t0, shift);
  2414. mpz_mul_2exp (s0, s0, shift);
  2415. }
  2416. power += shift;
  2417. }
  2418. }
  2419. /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
  2420. cofactors. */
  2421. mpz_mul_2exp (tv, tv, gz);
  2422. mpz_neg (s0, s0);
  2423. /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
  2424. adjust cofactors, we need u / g and v / g */
  2425. mpz_divexact (s1, v, tv);
  2426. mpz_abs (s1, s1);
  2427. mpz_divexact (t1, u, tv);
  2428. mpz_abs (t1, t1);
  2429. while (power-- > 0)
  2430. {
  2431. /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
  2432. if (mpz_odd_p (s0) || mpz_odd_p (t0))
  2433. {
  2434. mpz_sub (s0, s0, s1);
  2435. mpz_add (t0, t0, t1);
  2436. }
  2437. assert (mpz_even_p (t0) && mpz_even_p (s0));
  2438. mpz_tdiv_q_2exp (s0, s0, 1);
  2439. mpz_tdiv_q_2exp (t0, t0, 1);
  2440. }
  2441. /* Arrange so that |s| < |u| / 2g */
  2442. mpz_add (s1, s0, s1);
  2443. if (mpz_cmpabs (s0, s1) > 0)
  2444. {
  2445. mpz_swap (s0, s1);
  2446. mpz_sub (t0, t0, t1);
  2447. }
  2448. if (u->_mp_size < 0)
  2449. mpz_neg (s0, s0);
  2450. if (v->_mp_size < 0)
  2451. mpz_neg (t0, t0);
  2452. mpz_swap (g, tv);
  2453. if (s)
  2454. mpz_swap (s, s0);
  2455. if (t)
  2456. mpz_swap (t, t0);
  2457. mpz_clear (tu);
  2458. mpz_clear (tv);
  2459. mpz_clear (s0);
  2460. mpz_clear (s1);
  2461. mpz_clear (t0);
  2462. mpz_clear (t1);
  2463. }
  2464. void
  2465. mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
  2466. {
  2467. mpz_t g;
  2468. if (u->_mp_size == 0 || v->_mp_size == 0)
  2469. {
  2470. r->_mp_size = 0;
  2471. return;
  2472. }
  2473. mpz_init (g);
  2474. mpz_gcd (g, u, v);
  2475. mpz_divexact (g, u, g);
  2476. mpz_mul (r, g, v);
  2477. mpz_clear (g);
  2478. mpz_abs (r, r);
  2479. }
  2480. void
  2481. mpz_lcm_ui (mpz_t r, const mpz_t u, uintptr_t v)
  2482. {
  2483. if (v == 0 || u->_mp_size == 0)
  2484. {
  2485. r->_mp_size = 0;
  2486. return;
  2487. }
  2488. v /= mpz_gcd_ui (NULL, u, v);
  2489. mpz_mul_ui (r, u, v);
  2490. mpz_abs (r, r);
  2491. }
  2492. int
  2493. mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
  2494. {
  2495. mpz_t g, tr;
  2496. int invertible;
  2497. if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
  2498. return 0;
  2499. mpz_init (g);
  2500. mpz_init (tr);
  2501. mpz_gcdext (g, tr, NULL, u, m);
  2502. invertible = (mpz_cmp_ui (g, 1) == 0);
  2503. if (invertible)
  2504. {
  2505. if (tr->_mp_size < 0)
  2506. {
  2507. if (m->_mp_size >= 0)
  2508. mpz_add (tr, tr, m);
  2509. else
  2510. mpz_sub (tr, tr, m);
  2511. }
  2512. mpz_swap (r, tr);
  2513. }
  2514. mpz_clear (g);
  2515. mpz_clear (tr);
  2516. return invertible;
  2517. }
  2518. /* Higher level operations (sqrt, pow and root) */
  2519. void
  2520. mpz_pow_ui (mpz_t r, const mpz_t b, uintptr_t e)
  2521. {
  2522. uintptr_t bit;
  2523. mpz_t tr;
  2524. mpz_init_set_ui (tr, 1);
  2525. bit = GMP_ULONG_HIGHBIT;
  2526. do
  2527. {
  2528. mpz_mul (tr, tr, tr);
  2529. if (e & bit)
  2530. mpz_mul (tr, tr, b);
  2531. bit >>= 1;
  2532. }
  2533. while (bit > 0);
  2534. mpz_swap (r, tr);
  2535. mpz_clear (tr);
  2536. }
  2537. void
  2538. mpz_ui_pow_ui (mpz_t r, uintptr_t blimb, uintptr_t e)
  2539. {
  2540. mpz_t b;
  2541. mpz_init_set_ui (b, blimb);
  2542. mpz_pow_ui (r, b, e);
  2543. mpz_clear (b);
  2544. }
  2545. void
  2546. mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
  2547. {
  2548. mpz_t tr;
  2549. mpz_t base;
  2550. mp_size_t en, mn;
  2551. mp_srcptr mp;
  2552. struct gmp_div_inverse minv;
  2553. unsigned shift;
  2554. mp_ptr tp = NULL;
  2555. en = GMP_ABS (e->_mp_size);
  2556. mn = GMP_ABS (m->_mp_size);
  2557. if (mn == 0)
  2558. gmp_die ("mpz_powm: Zero modulo.");
  2559. if (en == 0)
  2560. {
  2561. mpz_set_ui (r, 1);
  2562. return;
  2563. }
  2564. mp = m->_mp_d;
  2565. mpn_div_qr_invert (&minv, mp, mn);
  2566. shift = minv.shift;
  2567. if (shift > 0)
  2568. {
  2569. /* To avoid shifts, we do all our reductions, except the final
  2570. one, using a *normalized* m. */
  2571. minv.shift = 0;
  2572. tp = gmp_xalloc_limbs (mn);
  2573. gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
  2574. mp = tp;
  2575. }
  2576. mpz_init (base);
  2577. if (e->_mp_size < 0)
  2578. {
  2579. if (!mpz_invert (base, b, m))
  2580. gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
  2581. }
  2582. else
  2583. {
  2584. mp_size_t bn;
  2585. mpz_abs (base, b);
  2586. bn = base->_mp_size;
  2587. if (bn >= mn)
  2588. {
  2589. mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
  2590. bn = mn;
  2591. }
  2592. /* We have reduced the absolute value. Now take care of the
  2593. sign. Note that we get zero represented non-canonically as
  2594. m. */
  2595. if (b->_mp_size < 0)
  2596. {
  2597. mp_ptr bp = MPZ_REALLOC (base, mn);
  2598. gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
  2599. bn = mn;
  2600. }
  2601. base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
  2602. }
  2603. mpz_init_set_ui (tr, 1);
  2604. while (--en >= 0)
  2605. {
  2606. mp_limb_t w = e->_mp_d[en];
  2607. mp_limb_t bit;
  2608. bit = GMP_LIMB_HIGHBIT;
  2609. do
  2610. {
  2611. mpz_mul (tr, tr, tr);
  2612. if (w & bit)
  2613. mpz_mul (tr, tr, base);
  2614. if (tr->_mp_size > mn)
  2615. {
  2616. mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  2617. tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  2618. }
  2619. bit >>= 1;
  2620. }
  2621. while (bit > 0);
  2622. }
  2623. /* Final reduction */
  2624. if (tr->_mp_size >= mn)
  2625. {
  2626. minv.shift = shift;
  2627. mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  2628. tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  2629. }
  2630. if (tp)
  2631. gmp_free (tp);
  2632. mpz_swap (r, tr);
  2633. mpz_clear (tr);
  2634. mpz_clear (base);
  2635. }
  2636. void
  2637. mpz_powm_ui (mpz_t r, const mpz_t b, uintptr_t elimb, const mpz_t m)
  2638. {
  2639. mpz_t e;
  2640. mpz_init_set_ui (e, elimb);
  2641. mpz_powm (r, b, e, m);
  2642. mpz_clear (e);
  2643. }
  2644. /* x=trunc(y^(1/z)), r=y-x^z */
  2645. void
  2646. mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, uintptr_t z)
  2647. {
  2648. int sgn;
  2649. mpz_t t, u;
  2650. sgn = y->_mp_size < 0;
  2651. if ((~z & sgn) != 0)
  2652. gmp_die ("mpz_rootrem: Negative argument, with even root.");
  2653. if (z == 0)
  2654. gmp_die ("mpz_rootrem: Zeroth root.");
  2655. if (mpz_cmpabs_ui (y, 1) <= 0) {
  2656. if (x)
  2657. mpz_set (x, y);
  2658. if (r)
  2659. r->_mp_size = 0;
  2660. return;
  2661. }
  2662. mpz_init (u);
  2663. mpz_init (t);
  2664. mpz_setbit (t, mpz_sizeinbase (y, 2) / z + 1);
  2665. if (z == 2) /* simplify sqrt loop: z-1 == 1 */
  2666. do {
  2667. mpz_swap (u, t); /* u = x */
  2668. mpz_tdiv_q (t, y, u); /* t = y/x */
  2669. mpz_add (t, t, u); /* t = y/x + x */
  2670. mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */
  2671. } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
  2672. else /* z != 2 */ {
  2673. mpz_t v;
  2674. mpz_init (v);
  2675. if (sgn)
  2676. mpz_neg (t, t);
  2677. do {
  2678. mpz_swap (u, t); /* u = x */
  2679. mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */
  2680. mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */
  2681. mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */
  2682. mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */
  2683. mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */
  2684. } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
  2685. mpz_clear (v);
  2686. }
  2687. if (r) {
  2688. mpz_pow_ui (t, u, z);
  2689. mpz_sub (r, y, t);
  2690. }
  2691. if (x)
  2692. mpz_swap (x, u);
  2693. mpz_clear (u);
  2694. mpz_clear (t);
  2695. }
  2696. int
  2697. mpz_root (mpz_t x, const mpz_t y, uintptr_t z)
  2698. {
  2699. int res;
  2700. mpz_t r;
  2701. mpz_init (r);
  2702. mpz_rootrem (x, r, y, z);
  2703. res = r->_mp_size == 0;
  2704. mpz_clear (r);
  2705. return res;
  2706. }
  2707. /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
  2708. void
  2709. mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
  2710. {
  2711. mpz_rootrem (s, r, u, 2);
  2712. }
  2713. void
  2714. mpz_sqrt (mpz_t s, const mpz_t u)
  2715. {
  2716. mpz_rootrem (s, NULL, u, 2);
  2717. }
  2718. int
  2719. mpz_perfect_square_p (const mpz_t u)
  2720. {
  2721. if (u->_mp_size <= 0)
  2722. return (u->_mp_size == 0);
  2723. else
  2724. return mpz_root (NULL, u, 2);
  2725. }
  2726. int
  2727. mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
  2728. {
  2729. mpz_t t;
  2730. assert (n > 0);
  2731. assert (p [n-1] != 0);
  2732. return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2);
  2733. }
  2734. mp_size_t
  2735. mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
  2736. {
  2737. mpz_t s, r, u;
  2738. mp_size_t res;
  2739. assert (n > 0);
  2740. assert (p [n-1] != 0);
  2741. mpz_init (r);
  2742. mpz_init (s);
  2743. mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2);
  2744. assert (s->_mp_size == (n+1)/2);
  2745. mpn_copyd (sp, s->_mp_d, s->_mp_size);
  2746. mpz_clear (s);
  2747. res = r->_mp_size;
  2748. if (rp)
  2749. mpn_copyd (rp, r->_mp_d, res);
  2750. mpz_clear (r);
  2751. return res;
  2752. }
  2753. /* Combinatorics */
  2754. void
  2755. mpz_mfac_uiui (mpz_t x, uintptr_t n, uintptr_t m)
  2756. {
  2757. mpz_set_ui (x, n + (n == 0));
  2758. if (m + 1 < 2) return;
  2759. while (n > m + 1)
  2760. mpz_mul_ui (x, x, n -= m);
  2761. }
  2762. void
  2763. mpz_2fac_ui (mpz_t x, uintptr_t n)
  2764. {
  2765. mpz_mfac_uiui (x, n, 2);
  2766. }
  2767. void
  2768. mpz_fac_ui (mpz_t x, uintptr_t n)
  2769. {
  2770. mpz_mfac_uiui (x, n, 1);
  2771. }
  2772. void
  2773. mpz_bin_uiui (mpz_t r, uintptr_t n, uintptr_t k)
  2774. {
  2775. mpz_t t;
  2776. mpz_set_ui (r, k <= n);
  2777. if (k > (n >> 1))
  2778. k = (k <= n) ? n - k : 0;
  2779. mpz_init (t);
  2780. mpz_fac_ui (t, k);
  2781. for (; k > 0; --k)
  2782. mpz_mul_ui (r, r, n--);
  2783. mpz_divexact (r, r, t);
  2784. mpz_clear (t);
  2785. }
  2786. /* Primality testing */
  2787. /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
  2788. /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
  2789. static int
  2790. gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b)
  2791. {
  2792. int c, bit = 0;
  2793. assert (b & 1);
  2794. assert (a != 0);
  2795. /* assert (mpn_gcd_11 (a, b) == 1); */
  2796. /* Below, we represent a and b shifted right so that the least
  2797. significant one bit is implicit. */
  2798. b >>= 1;
  2799. gmp_ctz(c, a);
  2800. a >>= 1;
  2801. do
  2802. {
  2803. a >>= c;
  2804. /* (2/b) = -1 if b = 3 or 5 mod 8 */
  2805. bit ^= c & (b ^ (b >> 1));
  2806. if (a < b)
  2807. {
  2808. bit ^= a & b;
  2809. a = b - a;
  2810. b -= a;
  2811. }
  2812. else
  2813. {
  2814. a -= b;
  2815. assert (a != 0);
  2816. }
  2817. gmp_ctz(c, a);
  2818. ++c;
  2819. }
  2820. while (b > 0);
  2821. return bit & 1 ? -1 : 1;
  2822. }
  2823. static void
  2824. gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n)
  2825. {
  2826. mpz_mod (Qk, Qk, n);
  2827. /* V_{2k} <- V_k ^ 2 - 2Q^k */
  2828. mpz_mul (V, V, V);
  2829. mpz_submul_ui (V, Qk, 2);
  2830. mpz_tdiv_r (V, V, n);
  2831. /* Q^{2k} = (Q^k)^2 */
  2832. mpz_mul (Qk, Qk, Qk);
  2833. }
  2834. /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
  2835. /* with P=1, Q=Q; k = (n>>b0)|1. */
  2836. /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a intptr_t */
  2837. /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
  2838. static int
  2839. gmp_lucas_mod (mpz_t V, mpz_t Qk, intptr_t Q,
  2840. mp_bitcnt_t b0, const mpz_t n)
  2841. {
  2842. mp_bitcnt_t bs;
  2843. mpz_t U;
  2844. int res;
  2845. assert (b0 > 0);
  2846. assert (Q <= - (INTPTR_MIN / 2));
  2847. assert (Q >= - (INTPTR_MAX / 2));
  2848. assert (mpz_cmp_ui (n, 4) > 0);
  2849. assert (mpz_odd_p (n));
  2850. mpz_init_set_ui (U, 1); /* U1 = 1 */
  2851. mpz_set_ui (V, 1); /* V1 = 1 */
  2852. mpz_set_si (Qk, Q);
  2853. for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;)
  2854. {
  2855. /* U_{2k} <- U_k * V_k */
  2856. mpz_mul (U, U, V);
  2857. /* V_{2k} <- V_k ^ 2 - 2Q^k */
  2858. /* Q^{2k} = (Q^k)^2 */
  2859. gmp_lucas_step_k_2k (V, Qk, n);
  2860. /* A step k->k+1 is performed if the bit in $n$ is 1 */
  2861. /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but */
  2862. /* should be 1 in $n+1$ (bs == b0) */
  2863. if (b0 == bs || mpz_tstbit (n, bs))
  2864. {
  2865. /* Q^{k+1} <- Q^k * Q */
  2866. mpz_mul_si (Qk, Qk, Q);
  2867. /* U_{k+1} <- (U_k + V_k) / 2 */
  2868. mpz_swap (U, V); /* Keep in V the old value of U_k */
  2869. mpz_add (U, U, V);
  2870. /* We have to compute U/2, so we need an even value, */
  2871. /* equivalent (mod n) */
  2872. if (mpz_odd_p (U))
  2873. mpz_add (U, U, n);
  2874. mpz_tdiv_q_2exp (U, U, 1);
  2875. /* V_{k+1} <-(D*U_k + V_k) / 2 =
  2876. U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
  2877. mpz_mul_si (V, V, -2*Q);
  2878. mpz_add (V, U, V);
  2879. mpz_tdiv_r (V, V, n);
  2880. }
  2881. mpz_tdiv_r (U, U, n);
  2882. }
  2883. res = U->_mp_size == 0;
  2884. mpz_clear (U);
  2885. return res;
  2886. }
  2887. /* Performs strong Lucas' test on x, with parameters suggested */
  2888. /* for the BPSW test. Qk is only passed to recycle a variable. */
  2889. /* Requires GCD (x,6) = 1.*/
  2890. static int
  2891. gmp_stronglucas (const mpz_t x, mpz_t Qk)
  2892. {
  2893. mp_bitcnt_t b0;
  2894. mpz_t V, n;
  2895. mp_limb_t maxD, D; /* The absolute value is stored. */
  2896. intptr_t Q;
  2897. mp_limb_t tl;
  2898. /* Test on the absolute value. */
  2899. mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size));
  2900. assert (mpz_odd_p (n));
  2901. /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
  2902. if (mpz_root (Qk, n, 2))
  2903. return 0; /* A square is composite. */
  2904. /* Check Ds up to square root (in case, n is prime)
  2905. or avoid overflows */
  2906. maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX;
  2907. D = 3;
  2908. /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
  2909. /* For those Ds we have (D/n) = (n/|D|) */
  2910. do
  2911. {
  2912. if (D >= maxD)
  2913. return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */
  2914. D += 2;
  2915. tl = mpz_tdiv_ui (n, D);
  2916. if (tl == 0)
  2917. return 0;
  2918. }
  2919. while (gmp_jacobi_coprime (tl, D) == 1);
  2920. mpz_init (V);
  2921. /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
  2922. b0 = mpz_scan0 (n, 0);
  2923. /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
  2924. Q = (D & 2) ? (intptr_t) (D >> 2) + 1 : -(intptr_t) (D >> 2);
  2925. if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */
  2926. while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */
  2927. /* V <- V ^ 2 - 2Q^k */
  2928. /* Q^{2k} = (Q^k)^2 */
  2929. gmp_lucas_step_k_2k (V, Qk, n);
  2930. mpz_clear (V);
  2931. return (b0 != 0);
  2932. }
  2933. static int
  2934. gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
  2935. const mpz_t q, mp_bitcnt_t k)
  2936. {
  2937. assert (k > 0);
  2938. /* Caller must initialize y to the base. */
  2939. mpz_powm (y, y, q, n);
  2940. if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
  2941. return 1;
  2942. while (--k > 0)
  2943. {
  2944. mpz_powm_ui (y, y, 2, n);
  2945. if (mpz_cmp (y, nm1) == 0)
  2946. return 1;
  2947. /* y == 1 means that the previous y was a non-trivial square root
  2948. of 1 (mod n). y == 0 means that n is a power of the base.
  2949. In either case, n is not prime. */
  2950. if (mpz_cmp_ui (y, 1) <= 0)
  2951. return 0;
  2952. }
  2953. return 0;
  2954. }
  2955. /* This product is 0xc0cfd797, and fits in 32 bits. */
  2956. #define GMP_PRIME_PRODUCT \
  2957. (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
  2958. /* Bit (p+1)/2 is set, for each odd prime <= 61 */
  2959. #define GMP_PRIME_MASK 0xc96996dcUL
  2960. int
  2961. mpz_probab_prime_p (const mpz_t n, int reps)
  2962. {
  2963. mpz_t nm1;
  2964. mpz_t q;
  2965. mpz_t y;
  2966. mp_bitcnt_t k;
  2967. int is_prime;
  2968. int j;
  2969. /* Note that we use the absolute value of n only, for compatibility
  2970. with the real GMP. */
  2971. if (mpz_even_p (n))
  2972. return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
  2973. /* Above test excludes n == 0 */
  2974. assert (n->_mp_size != 0);
  2975. if (mpz_cmpabs_ui (n, 64) < 0)
  2976. return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
  2977. if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
  2978. return 0;
  2979. /* All prime factors are >= 31. */
  2980. if (mpz_cmpabs_ui (n, 31*31) < 0)
  2981. return 2;
  2982. mpz_init (nm1);
  2983. mpz_init (q);
  2984. /* Find q and k, where q is odd and n = 1 + 2**k * q. */
  2985. mpz_abs (nm1, n);
  2986. nm1->_mp_d[0] -= 1;
  2987. k = mpz_scan1 (nm1, 0);
  2988. mpz_tdiv_q_2exp (q, nm1, k);
  2989. /* BPSW test */
  2990. mpz_init_set_ui (y, 2);
  2991. is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y);
  2992. reps -= 24; /* skip the first 24 repetitions */
  2993. /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
  2994. j^2 + j + 41 using Euler's polynomial. We potentially stop early,
  2995. if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
  2996. 30 (a[30] == 971 > 31*31 == 961). */
  2997. for (j = 0; is_prime & (j < reps); j++)
  2998. {
  2999. mpz_set_ui (y, (uintptr_t) j*j+j+41);
  3000. if (mpz_cmp (y, nm1) >= 0)
  3001. {
  3002. /* Don't try any further bases. This "early" break does not affect
  3003. the result for any reasonable reps value (<=5000 was tested) */
  3004. assert (j >= 30);
  3005. break;
  3006. }
  3007. is_prime = gmp_millerrabin (n, nm1, y, q, k);
  3008. }
  3009. mpz_clear (nm1);
  3010. mpz_clear (q);
  3011. mpz_clear (y);
  3012. return is_prime;
  3013. }
  3014. /* Logical operations and bit manipulation. */
  3015. /* Numbers are treated as if represented in two's complement (and
  3016. infinitely sign extended). For a negative values we get the two's
  3017. complement from -x = ~x + 1, where ~ is bitwise complement.
  3018. Negation transforms
  3019. xxxx10...0
  3020. into
  3021. yyyy10...0
  3022. where yyyy is the bitwise complement of xxxx. So least significant
  3023. bits, up to and including the first one bit, are unchanged, and
  3024. the more significant bits are all complemented.
  3025. To change a bit from zero to one in a negative number, subtract the
  3026. corresponding power of two from the absolute value. This can never
  3027. underflow. To change a bit from one to zero, add the corresponding
  3028. power of two, and this might overflow. E.g., if x = -001111, the
  3029. two's complement is 110001. Clearing the least significant bit, we
  3030. get two's complement 110000, and -010000. */
  3031. int
  3032. mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
  3033. {
  3034. mp_size_t limb_index;
  3035. unsigned shift;
  3036. mp_size_t ds;
  3037. mp_size_t dn;
  3038. mp_limb_t w;
  3039. int bit;
  3040. ds = d->_mp_size;
  3041. dn = GMP_ABS (ds);
  3042. limb_index = bit_index / GMP_LIMB_BITS;
  3043. if (limb_index >= dn)
  3044. return ds < 0;
  3045. shift = bit_index % GMP_LIMB_BITS;
  3046. w = d->_mp_d[limb_index];
  3047. bit = (w >> shift) & 1;
  3048. if (ds < 0)
  3049. {
  3050. /* d < 0. Check if any of the bits below is set: If so, our bit
  3051. must be complemented. */
  3052. if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0)
  3053. return bit ^ 1;
  3054. while (--limb_index >= 0)
  3055. if (d->_mp_d[limb_index] > 0)
  3056. return bit ^ 1;
  3057. }
  3058. return bit;
  3059. }
  3060. static void
  3061. mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
  3062. {
  3063. mp_size_t dn, limb_index;
  3064. mp_limb_t bit;
  3065. mp_ptr dp;
  3066. dn = GMP_ABS (d->_mp_size);
  3067. limb_index = bit_index / GMP_LIMB_BITS;
  3068. bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  3069. if (limb_index >= dn)
  3070. {
  3071. mp_size_t i;
  3072. /* The bit should be set outside of the end of the number.
  3073. We have to increase the size of the number. */
  3074. dp = MPZ_REALLOC (d, limb_index + 1);
  3075. dp[limb_index] = bit;
  3076. for (i = dn; i < limb_index; i++)
  3077. dp[i] = 0;
  3078. dn = limb_index + 1;
  3079. }
  3080. else
  3081. {
  3082. mp_limb_t cy;
  3083. dp = d->_mp_d;
  3084. cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
  3085. if (cy > 0)
  3086. {
  3087. dp = MPZ_REALLOC (d, dn + 1);
  3088. dp[dn++] = cy;
  3089. }
  3090. }
  3091. d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  3092. }
  3093. static void
  3094. mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
  3095. {
  3096. mp_size_t dn, limb_index;
  3097. mp_ptr dp;
  3098. mp_limb_t bit;
  3099. dn = GMP_ABS (d->_mp_size);
  3100. dp = d->_mp_d;
  3101. limb_index = bit_index / GMP_LIMB_BITS;
  3102. bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  3103. assert (limb_index < dn);
  3104. gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
  3105. dn - limb_index, bit));
  3106. dn = mpn_normalized_size (dp, dn);
  3107. d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  3108. }
  3109. void
  3110. mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
  3111. {
  3112. if (!mpz_tstbit (d, bit_index))
  3113. {
  3114. if (d->_mp_size >= 0)
  3115. mpz_abs_add_bit (d, bit_index);
  3116. else
  3117. mpz_abs_sub_bit (d, bit_index);
  3118. }
  3119. }
  3120. void
  3121. mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
  3122. {
  3123. if (mpz_tstbit (d, bit_index))
  3124. {
  3125. if (d->_mp_size >= 0)
  3126. mpz_abs_sub_bit (d, bit_index);
  3127. else
  3128. mpz_abs_add_bit (d, bit_index);
  3129. }
  3130. }
  3131. void
  3132. mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
  3133. {
  3134. if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
  3135. mpz_abs_sub_bit (d, bit_index);
  3136. else
  3137. mpz_abs_add_bit (d, bit_index);
  3138. }
  3139. void
  3140. mpz_com (mpz_t r, const mpz_t u)
  3141. {
  3142. mpz_add_ui (r, u, 1);
  3143. mpz_neg (r, r);
  3144. }
  3145. void
  3146. mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
  3147. {
  3148. mp_size_t un, vn, rn, i;
  3149. mp_ptr up, vp, rp;
  3150. mp_limb_t ux, vx, rx;
  3151. mp_limb_t uc, vc, rc;
  3152. mp_limb_t ul, vl, rl;
  3153. un = GMP_ABS (u->_mp_size);
  3154. vn = GMP_ABS (v->_mp_size);
  3155. if (un < vn)
  3156. {
  3157. MPZ_SRCPTR_SWAP (u, v);
  3158. MP_SIZE_T_SWAP (un, vn);
  3159. }
  3160. if (vn == 0)
  3161. {
  3162. r->_mp_size = 0;
  3163. return;
  3164. }
  3165. uc = u->_mp_size < 0;
  3166. vc = v->_mp_size < 0;
  3167. rc = uc & vc;
  3168. ux = -uc;
  3169. vx = -vc;
  3170. rx = -rc;
  3171. /* If the smaller input is positive, higher limbs don't matter. */
  3172. rn = vx ? un : vn;
  3173. rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
  3174. up = u->_mp_d;
  3175. vp = v->_mp_d;
  3176. i = 0;
  3177. do
  3178. {
  3179. ul = (up[i] ^ ux) + uc;
  3180. uc = ul < uc;
  3181. vl = (vp[i] ^ vx) + vc;
  3182. vc = vl < vc;
  3183. rl = ( (ul & vl) ^ rx) + rc;
  3184. rc = rl < rc;
  3185. rp[i] = rl;
  3186. }
  3187. while (++i < vn);
  3188. assert (vc == 0);
  3189. for (; i < rn; i++)
  3190. {
  3191. ul = (up[i] ^ ux) + uc;
  3192. uc = ul < uc;
  3193. rl = ( (ul & vx) ^ rx) + rc;
  3194. rc = rl < rc;
  3195. rp[i] = rl;
  3196. }
  3197. if (rc)
  3198. rp[rn++] = rc;
  3199. else
  3200. rn = mpn_normalized_size (rp, rn);
  3201. r->_mp_size = rx ? -rn : rn;
  3202. }
  3203. void
  3204. mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
  3205. {
  3206. mp_size_t un, vn, rn, i;
  3207. mp_ptr up, vp, rp;
  3208. mp_limb_t ux, vx, rx;
  3209. mp_limb_t uc, vc, rc;
  3210. mp_limb_t ul, vl, rl;
  3211. un = GMP_ABS (u->_mp_size);
  3212. vn = GMP_ABS (v->_mp_size);
  3213. if (un < vn)
  3214. {
  3215. MPZ_SRCPTR_SWAP (u, v);
  3216. MP_SIZE_T_SWAP (un, vn);
  3217. }
  3218. if (vn == 0)
  3219. {
  3220. mpz_set (r, u);
  3221. return;
  3222. }
  3223. uc = u->_mp_size < 0;
  3224. vc = v->_mp_size < 0;
  3225. rc = uc | vc;
  3226. ux = -uc;
  3227. vx = -vc;
  3228. rx = -rc;
  3229. /* If the smaller input is negative, by sign extension higher limbs
  3230. don't matter. */
  3231. rn = vx ? vn : un;
  3232. rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
  3233. up = u->_mp_d;
  3234. vp = v->_mp_d;
  3235. i = 0;
  3236. do
  3237. {
  3238. ul = (up[i] ^ ux) + uc;
  3239. uc = ul < uc;
  3240. vl = (vp[i] ^ vx) + vc;
  3241. vc = vl < vc;
  3242. rl = ( (ul | vl) ^ rx) + rc;
  3243. rc = rl < rc;
  3244. rp[i] = rl;
  3245. }
  3246. while (++i < vn);
  3247. assert (vc == 0);
  3248. for (; i < rn; i++)
  3249. {
  3250. ul = (up[i] ^ ux) + uc;
  3251. uc = ul < uc;
  3252. rl = ( (ul | vx) ^ rx) + rc;
  3253. rc = rl < rc;
  3254. rp[i] = rl;
  3255. }
  3256. if (rc)
  3257. rp[rn++] = rc;
  3258. else
  3259. rn = mpn_normalized_size (rp, rn);
  3260. r->_mp_size = rx ? -rn : rn;
  3261. }
  3262. void
  3263. mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
  3264. {
  3265. mp_size_t un, vn, i;
  3266. mp_ptr up, vp, rp;
  3267. mp_limb_t ux, vx, rx;
  3268. mp_limb_t uc, vc, rc;
  3269. mp_limb_t ul, vl, rl;
  3270. un = GMP_ABS (u->_mp_size);
  3271. vn = GMP_ABS (v->_mp_size);
  3272. if (un < vn)
  3273. {
  3274. MPZ_SRCPTR_SWAP (u, v);
  3275. MP_SIZE_T_SWAP (un, vn);
  3276. }
  3277. if (vn == 0)
  3278. {
  3279. mpz_set (r, u);
  3280. return;
  3281. }
  3282. uc = u->_mp_size < 0;
  3283. vc = v->_mp_size < 0;
  3284. rc = uc ^ vc;
  3285. ux = -uc;
  3286. vx = -vc;
  3287. rx = -rc;
  3288. rp = MPZ_REALLOC (r, un + (mp_size_t) rc);
  3289. up = u->_mp_d;
  3290. vp = v->_mp_d;
  3291. i = 0;
  3292. do
  3293. {
  3294. ul = (up[i] ^ ux) + uc;
  3295. uc = ul < uc;
  3296. vl = (vp[i] ^ vx) + vc;
  3297. vc = vl < vc;
  3298. rl = (ul ^ vl ^ rx) + rc;
  3299. rc = rl < rc;
  3300. rp[i] = rl;
  3301. }
  3302. while (++i < vn);
  3303. assert (vc == 0);
  3304. for (; i < un; i++)
  3305. {
  3306. ul = (up[i] ^ ux) + uc;
  3307. uc = ul < uc;
  3308. rl = (ul ^ ux) + rc;
  3309. rc = rl < rc;
  3310. rp[i] = rl;
  3311. }
  3312. if (rc)
  3313. rp[un++] = rc;
  3314. else
  3315. un = mpn_normalized_size (rp, un);
  3316. r->_mp_size = rx ? -un : un;
  3317. }
  3318. static unsigned
  3319. gmp_popcount_limb (mp_limb_t x)
  3320. {
  3321. unsigned c;
  3322. /* Do 16 bits at a time, to avoid limb-sized constants. */
  3323. int LOCAL_SHIFT_BITS = 16;
  3324. for (c = 0; x > 0;)
  3325. {
  3326. unsigned w = x - ((x >> 1) & 0x5555);
  3327. w = ((w >> 2) & 0x3333) + (w & 0x3333);
  3328. w = (w >> 4) + w;
  3329. w = ((w >> 8) & 0x000f) + (w & 0x000f);
  3330. c += w;
  3331. if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)
  3332. x >>= LOCAL_SHIFT_BITS;
  3333. else
  3334. x = 0;
  3335. }
  3336. return c;
  3337. }
  3338. mp_bitcnt_t
  3339. mpn_popcount (mp_srcptr p, mp_size_t n)
  3340. {
  3341. mp_size_t i;
  3342. mp_bitcnt_t c;
  3343. for (c = 0, i = 0; i < n; i++)
  3344. c += gmp_popcount_limb (p[i]);
  3345. return c;
  3346. }
  3347. mp_bitcnt_t
  3348. mpz_popcount (const mpz_t u)
  3349. {
  3350. mp_size_t un;
  3351. un = u->_mp_size;
  3352. if (un < 0)
  3353. return ~(mp_bitcnt_t) 0;
  3354. return mpn_popcount (u->_mp_d, un);
  3355. }
  3356. mp_bitcnt_t
  3357. mpz_hamdist (const mpz_t u, const mpz_t v)
  3358. {
  3359. mp_size_t un, vn, i;
  3360. mp_limb_t uc, vc, ul, vl, comp;
  3361. mp_srcptr up, vp;
  3362. mp_bitcnt_t c;
  3363. un = u->_mp_size;
  3364. vn = v->_mp_size;
  3365. if ( (un ^ vn) < 0)
  3366. return ~(mp_bitcnt_t) 0;
  3367. comp = - (uc = vc = (un < 0));
  3368. if (uc)
  3369. {
  3370. assert (vn < 0);
  3371. un = -un;
  3372. vn = -vn;
  3373. }
  3374. up = u->_mp_d;
  3375. vp = v->_mp_d;
  3376. if (un < vn)
  3377. MPN_SRCPTR_SWAP (up, un, vp, vn);
  3378. for (i = 0, c = 0; i < vn; i++)
  3379. {
  3380. ul = (up[i] ^ comp) + uc;
  3381. uc = ul < uc;
  3382. vl = (vp[i] ^ comp) + vc;
  3383. vc = vl < vc;
  3384. c += gmp_popcount_limb (ul ^ vl);
  3385. }
  3386. assert (vc == 0);
  3387. for (; i < un; i++)
  3388. {
  3389. ul = (up[i] ^ comp) + uc;
  3390. uc = ul < uc;
  3391. c += gmp_popcount_limb (ul ^ comp);
  3392. }
  3393. return c;
  3394. }
  3395. mp_bitcnt_t
  3396. mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
  3397. {
  3398. mp_ptr up;
  3399. mp_size_t us, un, i;
  3400. mp_limb_t limb, ux;
  3401. us = u->_mp_size;
  3402. un = GMP_ABS (us);
  3403. i = starting_bit / GMP_LIMB_BITS;
  3404. /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
  3405. for u<0. Notice this test picks up any u==0 too. */
  3406. if (i >= un)
  3407. return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
  3408. up = u->_mp_d;
  3409. ux = 0;
  3410. limb = up[i];
  3411. if (starting_bit != 0)
  3412. {
  3413. if (us < 0)
  3414. {
  3415. ux = mpn_zero_p (up, i);
  3416. limb = ~ limb + ux;
  3417. ux = - (mp_limb_t) (limb >= ux);
  3418. }
  3419. /* Mask to 0 all bits before starting_bit, thus ignoring them. */
  3420. limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
  3421. }
  3422. return mpn_common_scan (limb, i, up, un, ux);
  3423. }
  3424. mp_bitcnt_t
  3425. mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
  3426. {
  3427. mp_ptr up;
  3428. mp_size_t us, un, i;
  3429. mp_limb_t limb, ux;
  3430. us = u->_mp_size;
  3431. ux = - (mp_limb_t) (us >= 0);
  3432. un = GMP_ABS (us);
  3433. i = starting_bit / GMP_LIMB_BITS;
  3434. /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
  3435. u<0. Notice this test picks up all cases of u==0 too. */
  3436. if (i >= un)
  3437. return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
  3438. up = u->_mp_d;
  3439. limb = up[i] ^ ux;
  3440. if (ux == 0)
  3441. limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
  3442. /* Mask all bits before starting_bit, thus ignoring them. */
  3443. limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
  3444. return mpn_common_scan (limb, i, up, un, ux);
  3445. }
  3446. /* MPZ base conversion. */
  3447. size_t
  3448. mpz_sizeinbase (const mpz_t u, int base)
  3449. {
  3450. mp_size_t un;
  3451. mp_srcptr up;
  3452. mp_ptr tp;
  3453. mp_bitcnt_t bits;
  3454. struct gmp_div_inverse bi;
  3455. size_t ndigits;
  3456. assert (base >= 2);
  3457. assert (base <= 62);
  3458. un = GMP_ABS (u->_mp_size);
  3459. if (un == 0)
  3460. return 1;
  3461. up = u->_mp_d;
  3462. bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
  3463. switch (base)
  3464. {
  3465. case 2:
  3466. return bits;
  3467. case 4:
  3468. return (bits + 1) / 2;
  3469. case 8:
  3470. return (bits + 2) / 3;
  3471. case 16:
  3472. return (bits + 3) / 4;
  3473. case 32:
  3474. return (bits + 4) / 5;
  3475. /* FIXME: Do something more clever for the common case of base
  3476. 10. */
  3477. }
  3478. tp = gmp_xalloc_limbs (un);
  3479. mpn_copyi (tp, up, un);
  3480. mpn_div_qr_1_invert (&bi, base);
  3481. ndigits = 0;
  3482. do
  3483. {
  3484. ndigits++;
  3485. mpn_div_qr_1_preinv (tp, tp, un, &bi);
  3486. un -= (tp[un-1] == 0);
  3487. }
  3488. while (un > 0);
  3489. gmp_free (tp);
  3490. return ndigits;
  3491. }
  3492. char *
  3493. mpz_get_str (char *sp, int base, const mpz_t u)
  3494. {
  3495. unsigned bits;
  3496. const char *digits;
  3497. mp_size_t un;
  3498. size_t i, sn;
  3499. digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
  3500. if (base > 1)
  3501. {
  3502. if (base <= 36)
  3503. digits = "0123456789abcdefghijklmnopqrstuvwxyz";
  3504. else if (base > 62)
  3505. return NULL;
  3506. }
  3507. else if (base >= -1)
  3508. base = 10;
  3509. else
  3510. {
  3511. base = -base;
  3512. if (base > 36)
  3513. return NULL;
  3514. }
  3515. sn = 1 + mpz_sizeinbase (u, base);
  3516. if (!sp)
  3517. sp = (char *) gmp_xalloc (1 + sn);
  3518. un = GMP_ABS (u->_mp_size);
  3519. if (un == 0)
  3520. {
  3521. sp[0] = '0';
  3522. sp[1] = '\0';
  3523. return sp;
  3524. }
  3525. i = 0;
  3526. if (u->_mp_size < 0)
  3527. sp[i++] = '-';
  3528. bits = mpn_base_power_of_two_p (base);
  3529. if (bits)
  3530. /* Not modified in this case. */
  3531. sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
  3532. else
  3533. {
  3534. struct mpn_base_info info;
  3535. mp_ptr tp;
  3536. mpn_get_base_info (&info, base);
  3537. tp = gmp_xalloc_limbs (un);
  3538. mpn_copyi (tp, u->_mp_d, un);
  3539. sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
  3540. gmp_free (tp);
  3541. }
  3542. for (; i < sn; i++)
  3543. sp[i] = digits[(unsigned char) sp[i]];
  3544. sp[sn] = '\0';
  3545. return sp;
  3546. }
  3547. int
  3548. mpz_set_str (mpz_t r, const char *sp, int base)
  3549. {
  3550. unsigned bits, value_of_a;
  3551. mp_size_t rn, alloc;
  3552. mp_ptr rp;
  3553. size_t dn;
  3554. int sign;
  3555. unsigned char *dp;
  3556. assert (base == 0 || (base >= 2 && base <= 62));
  3557. while (isspace( (unsigned char) *sp))
  3558. sp++;
  3559. sign = (*sp == '-');
  3560. sp += sign;
  3561. if (base == 0)
  3562. {
  3563. if (sp[0] == '0')
  3564. {
  3565. if (sp[1] == 'x' || sp[1] == 'X')
  3566. {
  3567. base = 16;
  3568. sp += 2;
  3569. }
  3570. else if (sp[1] == 'b' || sp[1] == 'B')
  3571. {
  3572. base = 2;
  3573. sp += 2;
  3574. }
  3575. else
  3576. base = 8;
  3577. }
  3578. else
  3579. base = 10;
  3580. }
  3581. if (!*sp)
  3582. {
  3583. r->_mp_size = 0;
  3584. return -1;
  3585. }
  3586. dp = (unsigned char *) gmp_xalloc (strlen (sp));
  3587. value_of_a = (base > 36) ? 36 : 10;
  3588. for (dn = 0; *sp; sp++)
  3589. {
  3590. unsigned digit;
  3591. if (isspace ((unsigned char) *sp))
  3592. continue;
  3593. else if (*sp >= '0' && *sp <= '9')
  3594. digit = *sp - '0';
  3595. else if (*sp >= 'a' && *sp <= 'z')
  3596. digit = *sp - 'a' + value_of_a;
  3597. else if (*sp >= 'A' && *sp <= 'Z')
  3598. digit = *sp - 'A' + 10;
  3599. else
  3600. digit = base; /* fail */
  3601. if (digit >= (unsigned) base)
  3602. {
  3603. gmp_free (dp);
  3604. r->_mp_size = 0;
  3605. return -1;
  3606. }
  3607. dp[dn++] = digit;
  3608. }
  3609. if (!dn)
  3610. {
  3611. gmp_free (dp);
  3612. r->_mp_size = 0;
  3613. return -1;
  3614. }
  3615. bits = mpn_base_power_of_two_p (base);
  3616. if (bits > 0)
  3617. {
  3618. alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  3619. rp = MPZ_REALLOC (r, alloc);
  3620. rn = mpn_set_str_bits (rp, dp, dn, bits);
  3621. }
  3622. else
  3623. {
  3624. struct mpn_base_info info;
  3625. mpn_get_base_info (&info, base);
  3626. alloc = (dn + info.exp - 1) / info.exp;
  3627. rp = MPZ_REALLOC (r, alloc);
  3628. rn = mpn_set_str_other (rp, dp, dn, base, &info);
  3629. /* Normalization, needed for all-zero input. */
  3630. assert (rn > 0);
  3631. rn -= rp[rn-1] == 0;
  3632. }
  3633. assert (rn <= alloc);
  3634. gmp_free (dp);
  3635. r->_mp_size = sign ? - rn : rn;
  3636. return 0;
  3637. }
  3638. int
  3639. mpz_init_set_str (mpz_t r, const char *sp, int base)
  3640. {
  3641. mpz_init (r);
  3642. return mpz_set_str (r, sp, base);
  3643. }
  3644. size_t
  3645. mpz_out_str (FILE *stream, int base, const mpz_t x)
  3646. {
  3647. char *str;
  3648. size_t len;
  3649. str = mpz_get_str (NULL, base, x);
  3650. if (!str)
  3651. return 0;
  3652. len = strlen (str);
  3653. len = fwrite (str, 1, len, stream);
  3654. gmp_free (str);
  3655. return len;
  3656. }
  3657. static int
  3658. gmp_detect_endian (void)
  3659. {
  3660. static const int i = 2;
  3661. const unsigned char *p = (const unsigned char *) &i;
  3662. return 1 - *p;
  3663. }
  3664. /* Import and export. Does not support nails. */
  3665. void
  3666. mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
  3667. size_t nails, const void *src)
  3668. {
  3669. const unsigned char *p;
  3670. ptrdiff_t word_step;
  3671. mp_ptr rp;
  3672. mp_size_t rn;
  3673. /* The current (partial) limb. */
  3674. mp_limb_t limb;
  3675. /* The number of bytes already copied to this limb (starting from
  3676. the low end). */
  3677. size_t bytes;
  3678. /* The index where the limb should be stored, when completed. */
  3679. mp_size_t i;
  3680. if (nails != 0)
  3681. gmp_die ("mpz_import: Nails not supported.");
  3682. assert (order == 1 || order == -1);
  3683. assert (endian >= -1 && endian <= 1);
  3684. if (endian == 0)
  3685. endian = gmp_detect_endian ();
  3686. p = (unsigned char *) src;
  3687. word_step = (order != endian) ? 2 * size : 0;
  3688. /* Process bytes from the least significant end, so point p at the
  3689. least significant word. */
  3690. if (order == 1)
  3691. {
  3692. p += size * (count - 1);
  3693. word_step = - word_step;
  3694. }
  3695. /* And at least significant byte of that word. */
  3696. if (endian == 1)
  3697. p += (size - 1);
  3698. rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
  3699. rp = MPZ_REALLOC (r, rn);
  3700. for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
  3701. {
  3702. size_t j;
  3703. for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
  3704. {
  3705. limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
  3706. if (bytes == sizeof(mp_limb_t))
  3707. {
  3708. rp[i++] = limb;
  3709. bytes = 0;
  3710. limb = 0;
  3711. }
  3712. }
  3713. }
  3714. assert (i + (bytes > 0) == rn);
  3715. if (limb != 0)
  3716. rp[i++] = limb;
  3717. else
  3718. i = mpn_normalized_size (rp, i);
  3719. r->_mp_size = i;
  3720. }
  3721. void *
  3722. mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
  3723. size_t nails, const mpz_t u)
  3724. {
  3725. size_t count;
  3726. mp_size_t un;
  3727. if (nails != 0)
  3728. gmp_die ("mpz_import: Nails not supported.");
  3729. assert (order == 1 || order == -1);
  3730. assert (endian >= -1 && endian <= 1);
  3731. assert (size > 0 || u->_mp_size == 0);
  3732. un = u->_mp_size;
  3733. count = 0;
  3734. if (un != 0)
  3735. {
  3736. size_t k;
  3737. unsigned char *p;
  3738. ptrdiff_t word_step;
  3739. /* The current (partial) limb. */
  3740. mp_limb_t limb;
  3741. /* The number of bytes left to do in this limb. */
  3742. size_t bytes;
  3743. /* The index where the limb was read. */
  3744. mp_size_t i;
  3745. un = GMP_ABS (un);
  3746. /* Count bytes in top limb. */
  3747. limb = u->_mp_d[un-1];
  3748. assert (limb != 0);
  3749. k = (GMP_LIMB_BITS <= CHAR_BIT);
  3750. if (!k)
  3751. {
  3752. do {
  3753. int LOCAL_CHAR_BIT = CHAR_BIT;
  3754. k++; limb >>= LOCAL_CHAR_BIT;
  3755. } while (limb != 0);
  3756. }
  3757. /* else limb = 0; */
  3758. count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
  3759. if (!r)
  3760. r = gmp_xalloc (count * size);
  3761. if (endian == 0)
  3762. endian = gmp_detect_endian ();
  3763. p = (unsigned char *) r;
  3764. word_step = (order != endian) ? 2 * size : 0;
  3765. /* Process bytes from the least significant end, so point p at the
  3766. least significant word. */
  3767. if (order == 1)
  3768. {
  3769. p += size * (count - 1);
  3770. word_step = - word_step;
  3771. }
  3772. /* And at least significant byte of that word. */
  3773. if (endian == 1)
  3774. p += (size - 1);
  3775. for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
  3776. {
  3777. size_t j;
  3778. for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian)
  3779. {
  3780. if (sizeof (mp_limb_t) == 1)
  3781. {
  3782. if (i < un)
  3783. *p = u->_mp_d[i++];
  3784. else
  3785. *p = 0;
  3786. }
  3787. else
  3788. {
  3789. int LOCAL_CHAR_BIT = CHAR_BIT;
  3790. if (bytes == 0)
  3791. {
  3792. if (i < un)
  3793. limb = u->_mp_d[i++];
  3794. bytes = sizeof (mp_limb_t);
  3795. }
  3796. *p = limb;
  3797. limb >>= LOCAL_CHAR_BIT;
  3798. bytes--;
  3799. }
  3800. }
  3801. }
  3802. assert (i == un);
  3803. assert (k == count);
  3804. }
  3805. if (countp)
  3806. *countp = count;
  3807. return r;
  3808. }