os_dep.c 171 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. #include "private/gc_priv.h"
  17. #if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
  18. && !defined(MSWINCE) && !defined(SN_TARGET_ORBIS) \
  19. && !defined(SN_TARGET_PSP2) && !defined(__CC_ARM)
  20. # include <sys/types.h>
  21. # if !defined(MSWIN32) && !defined(MSWIN_XBOX1)
  22. # include <unistd.h>
  23. # endif
  24. #endif
  25. #include <stdio.h>
  26. #if defined(MSWINCE) || defined(SN_TARGET_PS3)
  27. # define SIGSEGV 0 /* value is irrelevant */
  28. #else
  29. # include <signal.h>
  30. #endif
  31. #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(NACL) \
  32. || defined(SYMBIAN)
  33. # include <fcntl.h>
  34. #endif
  35. #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
  36. # include <ctype.h>
  37. #endif
  38. /* Blatantly OS dependent routines, except for those that are related */
  39. /* to dynamic loading. */
  40. #ifdef AMIGA
  41. # define GC_AMIGA_DEF
  42. # include "extra/AmigaOS.c"
  43. # undef GC_AMIGA_DEF
  44. #endif
  45. #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  46. # ifndef WIN32_LEAN_AND_MEAN
  47. # define WIN32_LEAN_AND_MEAN 1
  48. # endif
  49. # define NOSERVICE
  50. # include <windows.h>
  51. /* It's not clear this is completely kosher under Cygwin. But it */
  52. /* allows us to get a working GC_get_stack_base. */
  53. #endif
  54. #ifdef MACOS
  55. # include <Processes.h>
  56. #endif
  57. #ifdef IRIX5
  58. # include <sys/uio.h>
  59. # include <malloc.h> /* for locking */
  60. #endif
  61. #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
  62. # if defined(USE_MUNMAP) && !defined(USE_MMAP) && !defined(CPPCHECK)
  63. # error "invalid config - USE_MUNMAP requires USE_MMAP"
  64. # endif
  65. # include <sys/types.h>
  66. # include <sys/mman.h>
  67. # include <sys/stat.h>
  68. # include <errno.h>
  69. #endif
  70. #ifdef DARWIN
  71. /* for get_etext and friends */
  72. # include <mach-o/getsect.h>
  73. #endif
  74. #ifdef DJGPP
  75. /* Apparently necessary for djgpp 2.01. May cause problems with */
  76. /* other versions. */
  77. typedef long unsigned int caddr_t;
  78. #endif
  79. #ifdef PCR
  80. # include "il/PCR_IL.h"
  81. # include "th/PCR_ThCtl.h"
  82. # include "mm/PCR_MM.h"
  83. #endif
  84. #if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
  85. /* Declare GC_mprotect_stop and GC_mprotect_resume as extern "C". */
  86. # include "private/darwin_stop_world.h"
  87. #endif
  88. #if !defined(NO_EXECUTE_PERMISSION)
  89. STATIC GC_bool GC_pages_executable = TRUE;
  90. #else
  91. STATIC GC_bool GC_pages_executable = FALSE;
  92. #endif
  93. #define IGNORE_PAGES_EXECUTABLE 1
  94. /* Undefined on GC_pages_executable real use. */
  95. #ifdef NEED_PROC_MAPS
  96. /* We need to parse /proc/self/maps, either to find dynamic libraries, */
  97. /* and/or to find the register backing store base (IA64). Do it once */
  98. /* here. */
  99. #define READ read
  100. /* Repeatedly perform a read call until the buffer is filled or */
  101. /* we encounter EOF. */
  102. STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
  103. {
  104. size_t num_read = 0;
  105. ASSERT_CANCEL_DISABLED();
  106. while (num_read < count) {
  107. ssize_t result = READ(fd, buf + num_read, count - num_read);
  108. if (result < 0) return result;
  109. if (result == 0) break;
  110. num_read += result;
  111. }
  112. return num_read;
  113. }
  114. #ifdef THREADS
  115. /* Determine the length of a file by incrementally reading it into a */
  116. /* buffer. This would be silly to use it on a file supporting lseek, */
  117. /* but Linux /proc files usually do not. */
  118. STATIC size_t GC_get_file_len(int f)
  119. {
  120. size_t total = 0;
  121. ssize_t result;
  122. # define GET_FILE_LEN_BUF_SZ 500
  123. char buf[GET_FILE_LEN_BUF_SZ];
  124. do {
  125. result = read(f, buf, GET_FILE_LEN_BUF_SZ);
  126. if (result == -1) return 0;
  127. total += result;
  128. } while (result > 0);
  129. return total;
  130. }
  131. STATIC size_t GC_get_maps_len(void)
  132. {
  133. int f = open("/proc/self/maps", O_RDONLY);
  134. size_t result;
  135. if (f < 0) return 0; /* treat missing file as empty */
  136. result = GC_get_file_len(f);
  137. close(f);
  138. return result;
  139. }
  140. #endif /* THREADS */
  141. /* Copy the contents of /proc/self/maps to a buffer in our address */
  142. /* space. Return the address of the buffer, or zero on failure. */
  143. /* This code could be simplified if we could determine its size ahead */
  144. /* of time. */
  145. GC_INNER char * GC_get_maps(void)
  146. {
  147. ssize_t result;
  148. static char *maps_buf = NULL;
  149. static size_t maps_buf_sz = 1;
  150. size_t maps_size, old_maps_size = 0;
  151. /* The buffer is essentially static, so there must be a single client. */
  152. GC_ASSERT(I_HOLD_LOCK());
  153. /* Note that in the presence of threads, the maps file can */
  154. /* essentially shrink asynchronously and unexpectedly as */
  155. /* threads that we already think of as dead release their */
  156. /* stacks. And there is no easy way to read the entire */
  157. /* file atomically. This is arguably a misfeature of the */
  158. /* /proc/.../maps interface. */
  159. /* Since we expect the file can grow asynchronously in rare */
  160. /* cases, it should suffice to first determine */
  161. /* the size (using lseek or read), and then to reread the */
  162. /* file. If the size is inconsistent we have to retry. */
  163. /* This only matters with threads enabled, and if we use */
  164. /* this to locate roots (not the default). */
  165. # ifdef THREADS
  166. /* Determine the initial size of /proc/self/maps. */
  167. /* Note that lseek doesn't work, at least as of 2.6.15. */
  168. maps_size = GC_get_maps_len();
  169. if (0 == maps_size) return 0;
  170. # else
  171. maps_size = 4000; /* Guess */
  172. # endif
  173. /* Read /proc/self/maps, growing maps_buf as necessary. */
  174. /* Note that we may not allocate conventionally, and */
  175. /* thus can't use stdio. */
  176. do {
  177. int f;
  178. while (maps_size >= maps_buf_sz) {
  179. GC_scratch_recycle_no_gww(maps_buf, maps_buf_sz);
  180. /* Grow only by powers of 2, since we leak "too small" buffers.*/
  181. while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
  182. maps_buf = GC_scratch_alloc(maps_buf_sz);
  183. # ifdef THREADS
  184. /* Recompute initial length, since we allocated. */
  185. /* This can only happen a few times per program */
  186. /* execution. */
  187. maps_size = GC_get_maps_len();
  188. if (0 == maps_size) return 0;
  189. # endif
  190. if (maps_buf == 0) return 0;
  191. }
  192. GC_ASSERT(maps_buf_sz >= maps_size + 1);
  193. f = open("/proc/self/maps", O_RDONLY);
  194. if (-1 == f) return 0;
  195. # ifdef THREADS
  196. old_maps_size = maps_size;
  197. # endif
  198. maps_size = 0;
  199. do {
  200. result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
  201. if (result <= 0)
  202. break;
  203. maps_size += result;
  204. } while ((size_t)result == maps_buf_sz-1);
  205. close(f);
  206. if (result <= 0)
  207. return 0;
  208. # ifdef THREADS
  209. if (maps_size > old_maps_size) {
  210. /* This might be caused by e.g. thread creation. */
  211. WARN("Unexpected asynchronous /proc/self/maps growth"
  212. " (to %" WARN_PRIdPTR " bytes)\n", maps_size);
  213. }
  214. # endif
  215. } while (maps_size >= maps_buf_sz || maps_size < old_maps_size);
  216. /* In the single-threaded case, the second clause is false. */
  217. maps_buf[maps_size] = '\0';
  218. return maps_buf;
  219. }
  220. /*
  221. * GC_parse_map_entry parses an entry from /proc/self/maps so we can
  222. * locate all writable data segments that belong to shared libraries.
  223. * The format of one of these entries and the fields we care about
  224. * is as follows:
  225. * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
  226. * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
  227. * start end prot maj_dev
  228. *
  229. * Note that since about august 2003 kernels, the columns no longer have
  230. * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
  231. * anywhere, which is safer anyway.
  232. */
  233. /* Assign various fields of the first line in buf_ptr to (*start), */
  234. /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */
  235. /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */
  236. /* original buffer. */
  237. #if (defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)) \
  238. || defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
  239. || defined(REDIRECT_MALLOC)
  240. GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
  241. char **prot, unsigned int *maj_dev,
  242. char **mapping_name)
  243. {
  244. unsigned char *start_start, *end_start, *maj_dev_start;
  245. unsigned char *p; /* unsigned for isspace, isxdigit */
  246. if (buf_ptr == NULL || *buf_ptr == '\0') {
  247. return NULL;
  248. }
  249. p = (unsigned char *)buf_ptr;
  250. while (isspace(*p)) ++p;
  251. start_start = p;
  252. GC_ASSERT(isxdigit(*start_start));
  253. *start = (ptr_t)strtoul((char *)start_start, (char **)&p, 16);
  254. GC_ASSERT(*p=='-');
  255. ++p;
  256. end_start = p;
  257. GC_ASSERT(isxdigit(*end_start));
  258. *end = (ptr_t)strtoul((char *)end_start, (char **)&p, 16);
  259. GC_ASSERT(isspace(*p));
  260. while (isspace(*p)) ++p;
  261. GC_ASSERT(*p == 'r' || *p == '-');
  262. *prot = (char *)p;
  263. /* Skip past protection field to offset field */
  264. while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
  265. GC_ASSERT(isxdigit(*p));
  266. /* Skip past offset field, which we ignore */
  267. while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
  268. maj_dev_start = p;
  269. GC_ASSERT(isxdigit(*maj_dev_start));
  270. *maj_dev = strtoul((char *)maj_dev_start, NULL, 16);
  271. if (mapping_name == 0) {
  272. while (*p && *p++ != '\n');
  273. } else {
  274. while (*p && *p != '\n' && *p != '/' && *p != '[') p++;
  275. *mapping_name = (char *)p;
  276. while (*p && *p++ != '\n');
  277. }
  278. return (char *)p;
  279. }
  280. #endif /* REDIRECT_MALLOC || DYNAMIC_LOADING || IA64 || ... */
  281. #if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
  282. /* Try to read the backing store base from /proc/self/maps. */
  283. /* Return the bounds of the writable mapping with a 0 major device, */
  284. /* which includes the address passed as data. */
  285. /* Return FALSE if there is no such mapping. */
  286. GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
  287. ptr_t *endp)
  288. {
  289. char *prot;
  290. ptr_t my_start, my_end;
  291. unsigned int maj_dev;
  292. char *maps = GC_get_maps();
  293. char *buf_ptr = maps;
  294. if (0 == maps) return(FALSE);
  295. for (;;) {
  296. buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
  297. &prot, &maj_dev, 0);
  298. if (buf_ptr == NULL) return FALSE;
  299. if (prot[1] == 'w' && maj_dev == 0) {
  300. if ((word)my_end > (word)addr && (word)my_start <= (word)addr) {
  301. *startp = my_start;
  302. *endp = my_end;
  303. return TRUE;
  304. }
  305. }
  306. }
  307. return FALSE;
  308. }
  309. #endif /* IA64 || INCLUDE_LINUX_THREAD_DESCR */
  310. #if defined(REDIRECT_MALLOC)
  311. /* Find the text(code) mapping for the library whose name, after */
  312. /* stripping the directory part, starts with nm. */
  313. GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
  314. {
  315. size_t nm_len = strlen(nm);
  316. char *prot;
  317. char *map_path;
  318. ptr_t my_start, my_end;
  319. unsigned int maj_dev;
  320. char *maps = GC_get_maps();
  321. char *buf_ptr = maps;
  322. if (0 == maps) return(FALSE);
  323. for (;;) {
  324. buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
  325. &prot, &maj_dev, &map_path);
  326. if (buf_ptr == NULL) return FALSE;
  327. if (prot[0] == 'r' && prot[1] == '-' && prot[2] == 'x') {
  328. char *p = map_path;
  329. /* Set p to point just past last slash, if any. */
  330. while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p;
  331. while (*p != '/' && (word)p >= (word)map_path) --p;
  332. ++p;
  333. if (strncmp(nm, p, nm_len) == 0) {
  334. *startp = my_start;
  335. *endp = my_end;
  336. return TRUE;
  337. }
  338. }
  339. }
  340. return FALSE;
  341. }
  342. #endif /* REDIRECT_MALLOC */
  343. #ifdef IA64
  344. static ptr_t backing_store_base_from_proc(void)
  345. {
  346. ptr_t my_start, my_end;
  347. if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
  348. GC_COND_LOG_PRINTF("Failed to find backing store base from /proc\n");
  349. return 0;
  350. }
  351. return my_start;
  352. }
  353. #endif
  354. #endif /* NEED_PROC_MAPS */
  355. #if defined(SEARCH_FOR_DATA_START)
  356. /* The I386 case can be handled without a search. The Alpha case */
  357. /* used to be handled differently as well, but the rules changed */
  358. /* for recent Linux versions. This seems to be the easiest way to */
  359. /* cover all versions. */
  360. # if defined(LINUX) || defined(HURD)
  361. /* Some Linux distributions arrange to define __data_start. Some */
  362. /* define data_start as a weak symbol. The latter is technically */
  363. /* broken, since the user program may define data_start, in which */
  364. /* case we lose. Nonetheless, we try both, preferring __data_start.*/
  365. /* We assume gcc-compatible pragmas. */
  366. EXTERN_C_BEGIN
  367. # pragma weak __data_start
  368. # pragma weak data_start
  369. extern int __data_start[], data_start[];
  370. # ifdef HOST_ANDROID
  371. # pragma weak _etext
  372. # pragma weak __dso_handle
  373. extern int _etext[], __dso_handle[];
  374. # endif
  375. EXTERN_C_END
  376. # endif /* LINUX */
  377. ptr_t GC_data_start = NULL;
  378. GC_INNER void GC_init_linux_data_start(void)
  379. {
  380. ptr_t data_end = DATAEND;
  381. # if (defined(LINUX) || defined(HURD)) && !defined(IGNORE_PROG_DATA_START)
  382. /* Try the easy approaches first: */
  383. # ifdef HOST_ANDROID
  384. /* Workaround for "gold" (default) linker (as of Android NDK r10e). */
  385. if ((word)__data_start < (word)_etext
  386. && (word)_etext < (word)__dso_handle) {
  387. GC_data_start = (ptr_t)(__dso_handle);
  388. # ifdef DEBUG_ADD_DEL_ROOTS
  389. GC_log_printf(
  390. "__data_start is wrong; using __dso_handle as data start\n");
  391. # endif
  392. } else
  393. # endif
  394. /* else */ if (COVERT_DATAFLOW(__data_start) != 0) {
  395. GC_data_start = (ptr_t)(__data_start);
  396. } else {
  397. GC_data_start = (ptr_t)(data_start);
  398. }
  399. if (COVERT_DATAFLOW(GC_data_start) != 0) {
  400. if ((word)GC_data_start > (word)data_end)
  401. ABORT_ARG2("Wrong __data_start/_end pair",
  402. ": %p .. %p", (void *)GC_data_start, (void *)data_end);
  403. return;
  404. }
  405. # ifdef DEBUG_ADD_DEL_ROOTS
  406. GC_log_printf("__data_start not provided\n");
  407. # endif
  408. # endif /* LINUX */
  409. if (GC_no_dls) {
  410. /* Not needed, avoids the SIGSEGV caused by */
  411. /* GC_find_limit which complicates debugging. */
  412. GC_data_start = data_end; /* set data root size to 0 */
  413. return;
  414. }
  415. GC_data_start = GC_find_limit(data_end, FALSE);
  416. }
  417. #endif /* SEARCH_FOR_DATA_START */
  418. #ifdef ECOS
  419. # ifndef ECOS_GC_MEMORY_SIZE
  420. # define ECOS_GC_MEMORY_SIZE (448 * 1024)
  421. # endif /* ECOS_GC_MEMORY_SIZE */
  422. /* FIXME: This is a simple way of allocating memory which is */
  423. /* compatible with ECOS early releases. Later releases use a more */
  424. /* sophisticated means of allocating memory than this simple static */
  425. /* allocator, but this method is at least bound to work. */
  426. static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE];
  427. static char *ecos_gc_brk = ecos_gc_memory;
  428. static void *tiny_sbrk(ptrdiff_t increment)
  429. {
  430. void *p = ecos_gc_brk;
  431. ecos_gc_brk += increment;
  432. if ((word)ecos_gc_brk > (word)(ecos_gc_memory + sizeof(ecos_gc_memory))) {
  433. ecos_gc_brk -= increment;
  434. return NULL;
  435. }
  436. return p;
  437. }
  438. # define sbrk tiny_sbrk
  439. #endif /* ECOS */
  440. #if defined(NETBSD) && defined(__ELF__)
  441. ptr_t GC_data_start = NULL;
  442. EXTERN_C_BEGIN
  443. extern char **environ;
  444. EXTERN_C_END
  445. GC_INNER void GC_init_netbsd_elf(void)
  446. {
  447. /* This may need to be environ, without the underscore, for */
  448. /* some versions. */
  449. GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
  450. }
  451. #endif /* NETBSD */
  452. #if defined(ADDRESS_SANITIZER) && (defined(UNIX_LIKE) \
  453. || defined(NEED_FIND_LIMIT) || defined(MPROTECT_VDB)) \
  454. && !defined(CUSTOM_ASAN_DEF_OPTIONS)
  455. /* To tell ASan to allow GC to use its own SIGBUS/SEGV handlers. */
  456. /* The function is exported just to be visible to ASan library. */
  457. GC_API const char *__asan_default_options(void)
  458. {
  459. return "allow_user_segv_handler=1";
  460. }
  461. #endif
  462. #ifdef OPENBSD
  463. static struct sigaction old_segv_act;
  464. STATIC JMP_BUF GC_jmp_buf_openbsd;
  465. # ifdef THREADS
  466. # include <sys/syscall.h>
  467. EXTERN_C_BEGIN
  468. extern sigset_t __syscall(quad_t, ...);
  469. EXTERN_C_END
  470. # endif
  471. /* Don't use GC_find_limit() because siglongjmp() outside of the */
  472. /* signal handler by-passes our userland pthreads lib, leaving */
  473. /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */
  474. /* works-around the issues. */
  475. STATIC void GC_fault_handler_openbsd(int sig GC_ATTR_UNUSED)
  476. {
  477. LONGJMP(GC_jmp_buf_openbsd, 1);
  478. }
  479. /* Return the first non-addressable location > p or bound. */
  480. /* Requires the allocation lock. */
  481. STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
  482. {
  483. static volatile ptr_t result;
  484. /* Safer if static, since otherwise it may not be */
  485. /* preserved across the longjmp. Can safely be */
  486. /* static since it's only called with the */
  487. /* allocation lock held. */
  488. struct sigaction act;
  489. word pgsz = (word)sysconf(_SC_PAGESIZE);
  490. GC_ASSERT((word)bound >= pgsz);
  491. GC_ASSERT(I_HOLD_LOCK());
  492. act.sa_handler = GC_fault_handler_openbsd;
  493. sigemptyset(&act.sa_mask);
  494. act.sa_flags = SA_NODEFER | SA_RESTART;
  495. /* act.sa_restorer is deprecated and should not be initialized. */
  496. sigaction(SIGSEGV, &act, &old_segv_act);
  497. if (SETJMP(GC_jmp_buf_openbsd) == 0) {
  498. result = (ptr_t)((word)p & ~(pgsz-1));
  499. for (;;) {
  500. if ((word)result >= (word)bound - pgsz) {
  501. result = bound;
  502. break;
  503. }
  504. result += pgsz; /* no overflow expected */
  505. GC_noop1((word)(*result));
  506. }
  507. }
  508. # ifdef THREADS
  509. /* Due to the siglongjump we need to manually unmask SIGPROF. */
  510. __syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF));
  511. # endif
  512. sigaction(SIGSEGV, &old_segv_act, 0);
  513. return(result);
  514. }
  515. /* Return first addressable location > p or bound. */
  516. /* Requires the allocation lock. */
  517. STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound)
  518. {
  519. static volatile ptr_t result;
  520. static volatile int firstpass;
  521. struct sigaction act;
  522. word pgsz = (word)sysconf(_SC_PAGESIZE);
  523. GC_ASSERT((word)bound >= pgsz);
  524. GC_ASSERT(I_HOLD_LOCK());
  525. act.sa_handler = GC_fault_handler_openbsd;
  526. sigemptyset(&act.sa_mask);
  527. act.sa_flags = SA_NODEFER | SA_RESTART;
  528. /* act.sa_restorer is deprecated and should not be initialized. */
  529. sigaction(SIGSEGV, &act, &old_segv_act);
  530. firstpass = 1;
  531. result = (ptr_t)((word)p & ~(pgsz-1));
  532. if (SETJMP(GC_jmp_buf_openbsd) != 0 || firstpass) {
  533. firstpass = 0;
  534. if ((word)result >= (word)bound - pgsz) {
  535. result = bound;
  536. } else {
  537. result += pgsz; /* no overflow expected */
  538. GC_noop1((word)(*result));
  539. }
  540. }
  541. sigaction(SIGSEGV, &old_segv_act, 0);
  542. return(result);
  543. }
  544. #endif /* OPENBSD */
  545. # ifdef OS2
  546. # include <stddef.h>
  547. # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
  548. struct exe_hdr {
  549. unsigned short magic_number;
  550. unsigned short padding[29];
  551. long new_exe_offset;
  552. };
  553. #define E_MAGIC(x) (x).magic_number
  554. #define EMAGIC 0x5A4D
  555. #define E_LFANEW(x) (x).new_exe_offset
  556. struct e32_exe {
  557. unsigned char magic_number[2];
  558. unsigned char byte_order;
  559. unsigned char word_order;
  560. unsigned long exe_format_level;
  561. unsigned short cpu;
  562. unsigned short os;
  563. unsigned long padding1[13];
  564. unsigned long object_table_offset;
  565. unsigned long object_count;
  566. unsigned long padding2[31];
  567. };
  568. #define E32_MAGIC1(x) (x).magic_number[0]
  569. #define E32MAGIC1 'L'
  570. #define E32_MAGIC2(x) (x).magic_number[1]
  571. #define E32MAGIC2 'X'
  572. #define E32_BORDER(x) (x).byte_order
  573. #define E32LEBO 0
  574. #define E32_WORDER(x) (x).word_order
  575. #define E32LEWO 0
  576. #define E32_CPU(x) (x).cpu
  577. #define E32CPU286 1
  578. #define E32_OBJTAB(x) (x).object_table_offset
  579. #define E32_OBJCNT(x) (x).object_count
  580. struct o32_obj {
  581. unsigned long size;
  582. unsigned long base;
  583. unsigned long flags;
  584. unsigned long pagemap;
  585. unsigned long mapsize;
  586. unsigned long reserved;
  587. };
  588. #define O32_FLAGS(x) (x).flags
  589. #define OBJREAD 0x0001L
  590. #define OBJWRITE 0x0002L
  591. #define OBJINVALID 0x0080L
  592. #define O32_SIZE(x) (x).size
  593. #define O32_BASE(x) (x).base
  594. # else /* IBM's compiler */
  595. /* A kludge to get around what appears to be a header file bug */
  596. # ifndef WORD
  597. # define WORD unsigned short
  598. # endif
  599. # ifndef DWORD
  600. # define DWORD unsigned long
  601. # endif
  602. # define EXE386 1
  603. # include <newexe.h>
  604. # include <exe386.h>
  605. # endif /* __IBMC__ */
  606. # define INCL_DOSEXCEPTIONS
  607. # define INCL_DOSPROCESS
  608. # define INCL_DOSERRORS
  609. # define INCL_DOSMODULEMGR
  610. # define INCL_DOSMEMMGR
  611. # include <os2.h>
  612. # endif /* OS/2 */
  613. /* Find the page size */
  614. GC_INNER size_t GC_page_size = 0;
  615. #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  616. # ifndef VER_PLATFORM_WIN32_CE
  617. # define VER_PLATFORM_WIN32_CE 3
  618. # endif
  619. # if defined(MSWINCE) && defined(THREADS)
  620. GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
  621. # endif
  622. GC_INNER SYSTEM_INFO GC_sysinfo;
  623. GC_INNER void GC_setpagesize(void)
  624. {
  625. GetSystemInfo(&GC_sysinfo);
  626. # if defined(CYGWIN32) && defined(USE_MUNMAP)
  627. /* Allocations made with mmap() are aligned to the allocation */
  628. /* granularity, which (at least on 64-bit Windows OS) is not the */
  629. /* same as the page size. Probably a separate variable could */
  630. /* be added to distinguish the allocation granularity from the */
  631. /* actual page size, but in practice there is no good reason to */
  632. /* make allocations smaller than dwAllocationGranularity, so we */
  633. /* just use it instead of the actual page size here (as Cygwin */
  634. /* itself does in many cases). */
  635. GC_page_size = (size_t)GC_sysinfo.dwAllocationGranularity;
  636. GC_ASSERT(GC_page_size >= (size_t)GC_sysinfo.dwPageSize);
  637. # else
  638. GC_page_size = (size_t)GC_sysinfo.dwPageSize;
  639. # endif
  640. # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
  641. {
  642. OSVERSIONINFO verInfo;
  643. /* Check the current WinCE version. */
  644. verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
  645. if (!GetVersionEx(&verInfo))
  646. ABORT("GetVersionEx failed");
  647. if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
  648. verInfo.dwMajorVersion < 6) {
  649. /* Only the first 32 MB of address space belongs to the */
  650. /* current process (unless WinCE 6.0+ or emulation). */
  651. GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20);
  652. # ifdef THREADS
  653. /* On some old WinCE versions, it's observed that */
  654. /* VirtualQuery calls don't work properly when used to */
  655. /* get thread current stack committed minimum. */
  656. if (verInfo.dwMajorVersion < 5)
  657. GC_dont_query_stack_min = TRUE;
  658. # endif
  659. }
  660. }
  661. # endif
  662. }
  663. # ifndef CYGWIN32
  664. # define is_writable(prot) ((prot) == PAGE_READWRITE \
  665. || (prot) == PAGE_WRITECOPY \
  666. || (prot) == PAGE_EXECUTE_READWRITE \
  667. || (prot) == PAGE_EXECUTE_WRITECOPY)
  668. /* Return the number of bytes that are writable starting at p. */
  669. /* The pointer p is assumed to be page aligned. */
  670. /* If base is not 0, *base becomes the beginning of the */
  671. /* allocation region containing p. */
  672. STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
  673. {
  674. MEMORY_BASIC_INFORMATION buf;
  675. word result;
  676. word protect;
  677. result = VirtualQuery(p, &buf, sizeof(buf));
  678. if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
  679. if (base != 0) *base = (ptr_t)(buf.AllocationBase);
  680. protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
  681. if (!is_writable(protect)) {
  682. return(0);
  683. }
  684. if (buf.State != MEM_COMMIT) return(0);
  685. return(buf.RegionSize);
  686. }
  687. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  688. {
  689. ptr_t trunc_sp;
  690. word size;
  691. /* Set page size if it is not ready (so client can use this */
  692. /* function even before GC is initialized). */
  693. if (!GC_page_size) GC_setpagesize();
  694. trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
  695. /* FIXME: This won't work if called from a deeply recursive */
  696. /* client code (and the committed stack space has grown). */
  697. size = GC_get_writable_length(trunc_sp, 0);
  698. GC_ASSERT(size != 0);
  699. sb -> mem_base = trunc_sp + size;
  700. return GC_SUCCESS;
  701. }
  702. # else /* CYGWIN32 */
  703. /* An alternate version for Cygwin (adapted from Dave Korn's */
  704. /* gcc version of boehm-gc). */
  705. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  706. {
  707. # ifdef X86_64
  708. sb -> mem_base = ((NT_TIB*)NtCurrentTeb())->StackBase;
  709. # else
  710. void * _tlsbase;
  711. __asm__ ("movl %%fs:4, %0"
  712. : "=r" (_tlsbase));
  713. sb -> mem_base = _tlsbase;
  714. # endif
  715. return GC_SUCCESS;
  716. }
  717. # endif /* CYGWIN32 */
  718. # define HAVE_GET_STACK_BASE
  719. #else /* !MSWIN32 */
  720. GC_INNER void GC_setpagesize(void)
  721. {
  722. # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
  723. GC_page_size = (size_t)GETPAGESIZE();
  724. # if !defined(CPPCHECK)
  725. if (0 == GC_page_size)
  726. ABORT("getpagesize failed");
  727. # endif
  728. # else
  729. /* It's acceptable to fake it. */
  730. GC_page_size = HBLKSIZE;
  731. # endif
  732. }
  733. #endif /* !MSWIN32 */
  734. #ifdef HAIKU
  735. # include <kernel/OS.h>
  736. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  737. {
  738. thread_info th;
  739. get_thread_info(find_thread(NULL),&th);
  740. sb->mem_base = th.stack_end;
  741. return GC_SUCCESS;
  742. }
  743. # define HAVE_GET_STACK_BASE
  744. #endif /* HAIKU */
  745. #ifdef OS2
  746. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  747. {
  748. PTIB ptib; /* thread information block */
  749. PPIB ppib;
  750. if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
  751. WARN("DosGetInfoBlocks failed\n", 0);
  752. return GC_UNIMPLEMENTED;
  753. }
  754. sb->mem_base = ptib->tib_pstacklimit;
  755. return GC_SUCCESS;
  756. }
  757. # define HAVE_GET_STACK_BASE
  758. #endif /* OS2 */
  759. # ifdef AMIGA
  760. # define GC_AMIGA_SB
  761. # include "extra/AmigaOS.c"
  762. # undef GC_AMIGA_SB
  763. # define GET_MAIN_STACKBASE_SPECIAL
  764. # endif /* AMIGA */
  765. # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
  766. typedef void (*GC_fault_handler_t)(int);
  767. # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
  768. || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
  769. || defined(NETBSD)
  770. static struct sigaction old_segv_act;
  771. # if defined(_sigargs) /* !Irix6.x */ \
  772. || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
  773. static struct sigaction old_bus_act;
  774. # endif
  775. # else
  776. static GC_fault_handler_t old_segv_handler;
  777. # ifdef HAVE_SIGBUS
  778. static GC_fault_handler_t old_bus_handler;
  779. # endif
  780. # endif
  781. GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
  782. {
  783. # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
  784. || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
  785. || defined(NETBSD)
  786. struct sigaction act;
  787. act.sa_handler = h;
  788. # ifdef SIGACTION_FLAGS_NODEFER_HACK
  789. /* Was necessary for Solaris 2.3 and very temporary */
  790. /* NetBSD bugs. */
  791. act.sa_flags = SA_RESTART | SA_NODEFER;
  792. # else
  793. act.sa_flags = SA_RESTART;
  794. # endif
  795. (void) sigemptyset(&act.sa_mask);
  796. /* act.sa_restorer is deprecated and should not be initialized. */
  797. # ifdef GC_IRIX_THREADS
  798. /* Older versions have a bug related to retrieving and */
  799. /* and setting a handler at the same time. */
  800. (void) sigaction(SIGSEGV, 0, &old_segv_act);
  801. (void) sigaction(SIGSEGV, &act, 0);
  802. # else
  803. (void) sigaction(SIGSEGV, &act, &old_segv_act);
  804. # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
  805. || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
  806. /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
  807. /* Pthreads doesn't exist under Irix 5.x, so we */
  808. /* don't have to worry in the threads case. */
  809. (void) sigaction(SIGBUS, &act, &old_bus_act);
  810. # endif
  811. # endif /* !GC_IRIX_THREADS */
  812. # else
  813. old_segv_handler = signal(SIGSEGV, h);
  814. # ifdef HAVE_SIGBUS
  815. old_bus_handler = signal(SIGBUS, h);
  816. # endif
  817. # endif
  818. # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
  819. GC_noop1((word)&__asan_default_options);
  820. # endif
  821. }
  822. # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
  823. # if defined(NEED_FIND_LIMIT) \
  824. || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
  825. /* Some tools to implement HEURISTIC2 */
  826. # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
  827. GC_INNER JMP_BUF GC_jmp_buf;
  828. STATIC void GC_fault_handler(int sig GC_ATTR_UNUSED)
  829. {
  830. LONGJMP(GC_jmp_buf, 1);
  831. }
  832. GC_INNER void GC_setup_temporary_fault_handler(void)
  833. {
  834. /* Handler is process-wide, so this should only happen in */
  835. /* one thread at a time. */
  836. GC_ASSERT(I_HOLD_LOCK());
  837. GC_set_and_save_fault_handler(GC_fault_handler);
  838. }
  839. GC_INNER void GC_reset_fault_handler(void)
  840. {
  841. # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
  842. || defined(HAIKU) || defined(HURD) || defined(FREEBSD) \
  843. || defined(NETBSD)
  844. (void) sigaction(SIGSEGV, &old_segv_act, 0);
  845. # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
  846. || defined(HURD) || defined(NETBSD)
  847. (void) sigaction(SIGBUS, &old_bus_act, 0);
  848. # endif
  849. # else
  850. (void) signal(SIGSEGV, old_segv_handler);
  851. # ifdef HAVE_SIGBUS
  852. (void) signal(SIGBUS, old_bus_handler);
  853. # endif
  854. # endif
  855. }
  856. /* Return the first non-addressable location > p (up) or */
  857. /* the smallest location q s.t. [q,p) is addressable (!up). */
  858. /* We assume that p (up) or p-1 (!up) is addressable. */
  859. /* Requires allocation lock. */
  860. STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
  861. {
  862. static volatile ptr_t result;
  863. /* Safer if static, since otherwise it may not be */
  864. /* preserved across the longjmp. Can safely be */
  865. /* static since it's only called with the */
  866. /* allocation lock held. */
  867. GC_ASSERT(up ? (word)bound >= MIN_PAGE_SIZE
  868. : (word)bound <= ~(word)MIN_PAGE_SIZE);
  869. GC_ASSERT(I_HOLD_LOCK());
  870. GC_setup_temporary_fault_handler();
  871. if (SETJMP(GC_jmp_buf) == 0) {
  872. result = (ptr_t)(((word)(p))
  873. & ~(MIN_PAGE_SIZE-1));
  874. for (;;) {
  875. if (up) {
  876. if ((word)result >= (word)bound - MIN_PAGE_SIZE) {
  877. result = bound;
  878. break;
  879. }
  880. result += MIN_PAGE_SIZE; /* no overflow expected */
  881. } else {
  882. if ((word)result <= (word)bound + MIN_PAGE_SIZE) {
  883. result = bound - MIN_PAGE_SIZE;
  884. /* This is to compensate */
  885. /* further result increment (we */
  886. /* do not modify "up" variable */
  887. /* since it might be clobbered */
  888. /* by setjmp otherwise). */
  889. break;
  890. }
  891. result -= MIN_PAGE_SIZE; /* no underflow expected */
  892. }
  893. GC_noop1((word)(*result));
  894. }
  895. }
  896. GC_reset_fault_handler();
  897. if (!up) {
  898. result += MIN_PAGE_SIZE;
  899. }
  900. return(result);
  901. }
  902. ptr_t GC_find_limit(ptr_t p, GC_bool up)
  903. {
  904. return GC_find_limit_with_bound(p, up, up ? (ptr_t)(word)(-1) : 0);
  905. }
  906. # endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
  907. #ifdef HPUX_STACKBOTTOM
  908. #include <sys/param.h>
  909. #include <sys/pstat.h>
  910. GC_INNER ptr_t GC_get_register_stack_base(void)
  911. {
  912. struct pst_vm_status vm_status;
  913. int i = 0;
  914. while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
  915. if (vm_status.pst_type == PS_RSESTACK) {
  916. return (ptr_t) vm_status.pst_vaddr;
  917. }
  918. }
  919. /* old way to get the register stackbottom */
  920. return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
  921. & ~(BACKING_STORE_ALIGNMENT - 1));
  922. }
  923. #endif /* HPUX_STACK_BOTTOM */
  924. #ifdef LINUX_STACKBOTTOM
  925. # include <sys/types.h>
  926. # include <sys/stat.h>
  927. # define STAT_SKIP 27 /* Number of fields preceding startstack */
  928. /* field in /proc/self/stat */
  929. # ifdef USE_LIBC_PRIVATES
  930. EXTERN_C_BEGIN
  931. # pragma weak __libc_stack_end
  932. extern ptr_t __libc_stack_end;
  933. # ifdef IA64
  934. # pragma weak __libc_ia64_register_backing_store_base
  935. extern ptr_t __libc_ia64_register_backing_store_base;
  936. # endif
  937. EXTERN_C_END
  938. # endif
  939. # ifdef IA64
  940. GC_INNER ptr_t GC_get_register_stack_base(void)
  941. {
  942. ptr_t result;
  943. # ifdef USE_LIBC_PRIVATES
  944. if (0 != &__libc_ia64_register_backing_store_base
  945. && 0 != __libc_ia64_register_backing_store_base) {
  946. /* Glibc 2.2.4 has a bug such that for dynamically linked */
  947. /* executables __libc_ia64_register_backing_store_base is */
  948. /* defined but uninitialized during constructor calls. */
  949. /* Hence we check for both nonzero address and value. */
  950. return __libc_ia64_register_backing_store_base;
  951. }
  952. # endif
  953. result = backing_store_base_from_proc();
  954. if (0 == result) {
  955. result = GC_find_limit(GC_save_regs_in_stack(), FALSE);
  956. /* Now seems to work better than constant displacement */
  957. /* heuristic used in 6.X versions. The latter seems to */
  958. /* fail for 2.6 kernels. */
  959. }
  960. return result;
  961. }
  962. # endif /* IA64 */
  963. STATIC ptr_t GC_linux_main_stack_base(void)
  964. {
  965. /* We read the stack base value from /proc/self/stat. We do this */
  966. /* using direct I/O system calls in order to avoid calling malloc */
  967. /* in case REDIRECT_MALLOC is defined. */
  968. # ifndef STAT_READ
  969. /* Also defined in pthread_support.c. */
  970. # define STAT_BUF_SIZE 4096
  971. # define STAT_READ read
  972. # endif
  973. /* Should probably call the real read, if read is wrapped. */
  974. char stat_buf[STAT_BUF_SIZE];
  975. int f;
  976. word result;
  977. int i, buf_offset = 0, len;
  978. /* First try the easy way. This should work for glibc 2.2 */
  979. /* This fails in a prelinked ("prelink" command) executable */
  980. /* since the correct value of __libc_stack_end never */
  981. /* becomes visible to us. The second test works around */
  982. /* this. */
  983. # ifdef USE_LIBC_PRIVATES
  984. if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
  985. # if defined(IA64)
  986. /* Some versions of glibc set the address 16 bytes too */
  987. /* low while the initialization code is running. */
  988. if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
  989. return __libc_stack_end + 0x10;
  990. } /* Otherwise it's not safe to add 16 bytes and we fall */
  991. /* back to using /proc. */
  992. # elif defined(SPARC)
  993. /* Older versions of glibc for 64-bit SPARC do not set this */
  994. /* variable correctly, it gets set to either zero or one. */
  995. if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
  996. return __libc_stack_end;
  997. # else
  998. return __libc_stack_end;
  999. # endif
  1000. }
  1001. # endif
  1002. f = open("/proc/self/stat", O_RDONLY);
  1003. if (f < 0)
  1004. ABORT("Couldn't read /proc/self/stat");
  1005. len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
  1006. close(f);
  1007. /* Skip the required number of fields. This number is hopefully */
  1008. /* constant across all Linux implementations. */
  1009. for (i = 0; i < STAT_SKIP; ++i) {
  1010. while (buf_offset < len && isspace(stat_buf[buf_offset++])) {
  1011. /* empty */
  1012. }
  1013. while (buf_offset < len && !isspace(stat_buf[buf_offset++])) {
  1014. /* empty */
  1015. }
  1016. }
  1017. /* Skip spaces. */
  1018. while (buf_offset < len && isspace(stat_buf[buf_offset])) {
  1019. buf_offset++;
  1020. }
  1021. /* Find the end of the number and cut the buffer there. */
  1022. for (i = 0; buf_offset + i < len; i++) {
  1023. if (!isdigit(stat_buf[buf_offset + i])) break;
  1024. }
  1025. if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
  1026. stat_buf[buf_offset + i] = '\0';
  1027. result = (word)STRTOULL(&stat_buf[buf_offset], NULL, 10);
  1028. if (result < 0x100000 || (result & (sizeof(word) - 1)) != 0)
  1029. ABORT("Absurd stack bottom value");
  1030. return (ptr_t)result;
  1031. }
  1032. #endif /* LINUX_STACKBOTTOM */
  1033. #ifdef QNX_STACKBOTTOM
  1034. STATIC ptr_t GC_qnx_main_stack_base(void)
  1035. {
  1036. return (ptr_t)__builtin_frame_address(0);
  1037. }
  1038. #endif /* QNX_STACKBOTTOM */
  1039. #ifdef FREEBSD_STACKBOTTOM
  1040. /* This uses an undocumented sysctl call, but at least one expert */
  1041. /* believes it will stay. */
  1042. # include <unistd.h>
  1043. # include <sys/types.h>
  1044. # include <sys/sysctl.h>
  1045. STATIC ptr_t GC_freebsd_main_stack_base(void)
  1046. {
  1047. int nm[2] = {CTL_KERN, KERN_USRSTACK};
  1048. ptr_t base;
  1049. size_t len = sizeof(ptr_t);
  1050. int r = sysctl(nm, 2, &base, &len, NULL, 0);
  1051. if (r) ABORT("Error getting main stack base");
  1052. return base;
  1053. }
  1054. #endif /* FREEBSD_STACKBOTTOM */
  1055. #if defined(ECOS) || defined(NOSYS)
  1056. ptr_t GC_get_main_stack_base(void)
  1057. {
  1058. return STACKBOTTOM;
  1059. }
  1060. # define GET_MAIN_STACKBASE_SPECIAL
  1061. #elif defined(SYMBIAN)
  1062. EXTERN_C_BEGIN
  1063. extern int GC_get_main_symbian_stack_base(void);
  1064. EXTERN_C_END
  1065. ptr_t GC_get_main_stack_base(void)
  1066. {
  1067. return (ptr_t)GC_get_main_symbian_stack_base();
  1068. }
  1069. # define GET_MAIN_STACKBASE_SPECIAL
  1070. #elif !defined(AMIGA) && !defined(HAIKU) && !defined(OS2) \
  1071. && !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) \
  1072. && !defined(GC_OPENBSD_THREADS) \
  1073. && (!defined(GC_SOLARIS_THREADS) || defined(_STRICT_STDC))
  1074. # if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
  1075. && (defined(THREADS) || defined(USE_GET_STACKBASE_FOR_MAIN))
  1076. # include <pthread.h>
  1077. # ifdef HAVE_PTHREAD_NP_H
  1078. # include <pthread_np.h> /* for pthread_attr_get_np() */
  1079. # endif
  1080. # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
  1081. /* We could use pthread_get_stackaddr_np even in case of a */
  1082. /* single-threaded gclib (there is no -lpthread on Darwin). */
  1083. # include <pthread.h>
  1084. # undef STACKBOTTOM
  1085. # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self())
  1086. # endif
  1087. ptr_t GC_get_main_stack_base(void)
  1088. {
  1089. ptr_t result;
  1090. # if (defined(HAVE_PTHREAD_ATTR_GET_NP) \
  1091. || defined(HAVE_PTHREAD_GETATTR_NP)) \
  1092. && (defined(USE_GET_STACKBASE_FOR_MAIN) \
  1093. || (defined(THREADS) && !defined(REDIRECT_MALLOC)))
  1094. pthread_attr_t attr;
  1095. void *stackaddr;
  1096. size_t size;
  1097. # ifdef HAVE_PTHREAD_ATTR_GET_NP
  1098. if (pthread_attr_init(&attr) == 0
  1099. && (pthread_attr_get_np(pthread_self(), &attr) == 0
  1100. ? TRUE : (pthread_attr_destroy(&attr), FALSE)))
  1101. # else /* HAVE_PTHREAD_GETATTR_NP */
  1102. if (pthread_getattr_np(pthread_self(), &attr) == 0)
  1103. # endif
  1104. {
  1105. if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
  1106. && stackaddr != NULL) {
  1107. (void)pthread_attr_destroy(&attr);
  1108. # ifdef STACK_GROWS_DOWN
  1109. stackaddr = (char *)stackaddr + size;
  1110. # endif
  1111. return (ptr_t)stackaddr;
  1112. }
  1113. (void)pthread_attr_destroy(&attr);
  1114. }
  1115. WARN("pthread_getattr_np or pthread_attr_getstack failed"
  1116. " for main thread\n", 0);
  1117. # endif
  1118. # ifdef STACKBOTTOM
  1119. result = STACKBOTTOM;
  1120. # else
  1121. # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
  1122. # ifdef HEURISTIC1
  1123. # ifdef STACK_GROWS_DOWN
  1124. result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
  1125. & ~STACKBOTTOM_ALIGNMENT_M1);
  1126. # else
  1127. result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
  1128. # endif
  1129. # elif defined(LINUX_STACKBOTTOM)
  1130. result = GC_linux_main_stack_base();
  1131. # elif defined(QNX_STACKBOTTOM)
  1132. result = GC_qnx_main_stack_base();
  1133. # elif defined(FREEBSD_STACKBOTTOM)
  1134. result = GC_freebsd_main_stack_base();
  1135. # elif defined(HEURISTIC2)
  1136. {
  1137. ptr_t sp = GC_approx_sp();
  1138. # ifdef STACK_GROWS_DOWN
  1139. result = GC_find_limit(sp, TRUE);
  1140. # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
  1141. if ((word)result > (word)HEURISTIC2_LIMIT
  1142. && (word)sp < (word)HEURISTIC2_LIMIT) {
  1143. result = HEURISTIC2_LIMIT;
  1144. }
  1145. # endif
  1146. # else
  1147. result = GC_find_limit(sp, FALSE);
  1148. # if defined(HEURISTIC2_LIMIT) && !defined(CPPCHECK)
  1149. if ((word)result < (word)HEURISTIC2_LIMIT
  1150. && (word)sp > (word)HEURISTIC2_LIMIT) {
  1151. result = HEURISTIC2_LIMIT;
  1152. }
  1153. # endif
  1154. # endif
  1155. }
  1156. # elif defined(STACK_NOT_SCANNED) || defined(CPPCHECK)
  1157. result = NULL;
  1158. # else
  1159. # error None of HEURISTIC* and *STACKBOTTOM defined!
  1160. # endif
  1161. # if defined(STACK_GROWS_DOWN) && !defined(CPPCHECK)
  1162. if (result == 0)
  1163. result = (ptr_t)(signed_word)(-sizeof(ptr_t));
  1164. # endif
  1165. # endif
  1166. GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)result);
  1167. return(result);
  1168. }
  1169. # define GET_MAIN_STACKBASE_SPECIAL
  1170. #endif /* !AMIGA, !HAIKU, !OPENBSD, !OS2, !Windows */
  1171. #if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
  1172. && defined(THREADS) && !defined(HAVE_GET_STACK_BASE)
  1173. # include <pthread.h>
  1174. # ifdef HAVE_PTHREAD_NP_H
  1175. # include <pthread_np.h>
  1176. # endif
  1177. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
  1178. {
  1179. pthread_attr_t attr;
  1180. size_t size;
  1181. # ifdef IA64
  1182. DCL_LOCK_STATE;
  1183. # endif
  1184. # ifdef HAVE_PTHREAD_ATTR_GET_NP
  1185. if (pthread_attr_init(&attr) != 0)
  1186. ABORT("pthread_attr_init failed");
  1187. if (pthread_attr_get_np(pthread_self(), &attr) != 0) {
  1188. WARN("pthread_attr_get_np failed\n", 0);
  1189. (void)pthread_attr_destroy(&attr);
  1190. return GC_UNIMPLEMENTED;
  1191. }
  1192. # else /* HAVE_PTHREAD_GETATTR_NP */
  1193. if (pthread_getattr_np(pthread_self(), &attr) != 0) {
  1194. WARN("pthread_getattr_np failed\n", 0);
  1195. return GC_UNIMPLEMENTED;
  1196. }
  1197. # endif
  1198. if (pthread_attr_getstack(&attr, &(b -> mem_base), &size) != 0) {
  1199. ABORT("pthread_attr_getstack failed");
  1200. }
  1201. (void)pthread_attr_destroy(&attr);
  1202. # ifdef STACK_GROWS_DOWN
  1203. b -> mem_base = (char *)(b -> mem_base) + size;
  1204. # endif
  1205. # ifdef IA64
  1206. /* We could try backing_store_base_from_proc, but that's safe */
  1207. /* only if no mappings are being asynchronously created. */
  1208. /* Subtracting the size from the stack base doesn't work for at */
  1209. /* least the main thread. */
  1210. LOCK();
  1211. {
  1212. IF_CANCEL(int cancel_state;)
  1213. ptr_t bsp;
  1214. ptr_t next_stack;
  1215. DISABLE_CANCEL(cancel_state);
  1216. bsp = GC_save_regs_in_stack();
  1217. next_stack = GC_greatest_stack_base_below(bsp);
  1218. if (0 == next_stack) {
  1219. b -> reg_base = GC_find_limit(bsp, FALSE);
  1220. } else {
  1221. /* Avoid walking backwards into preceding memory stack and */
  1222. /* growing it. */
  1223. b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
  1224. }
  1225. RESTORE_CANCEL(cancel_state);
  1226. }
  1227. UNLOCK();
  1228. # endif
  1229. return GC_SUCCESS;
  1230. }
  1231. # define HAVE_GET_STACK_BASE
  1232. #endif /* THREADS && (HAVE_PTHREAD_ATTR_GET_NP || HAVE_PTHREAD_GETATTR_NP) */
  1233. #if defined(GC_DARWIN_THREADS) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
  1234. # include <pthread.h>
  1235. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
  1236. {
  1237. /* pthread_get_stackaddr_np() should return stack bottom (highest */
  1238. /* stack address plus 1). */
  1239. b->mem_base = pthread_get_stackaddr_np(pthread_self());
  1240. GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)b->mem_base);
  1241. return GC_SUCCESS;
  1242. }
  1243. # define HAVE_GET_STACK_BASE
  1244. #endif /* GC_DARWIN_THREADS */
  1245. #ifdef GC_OPENBSD_THREADS
  1246. # include <sys/signal.h>
  1247. # include <pthread.h>
  1248. # include <pthread_np.h>
  1249. /* Find the stack using pthread_stackseg_np(). */
  1250. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  1251. {
  1252. stack_t stack;
  1253. if (pthread_stackseg_np(pthread_self(), &stack))
  1254. ABORT("pthread_stackseg_np(self) failed");
  1255. sb->mem_base = stack.ss_sp;
  1256. return GC_SUCCESS;
  1257. }
  1258. # define HAVE_GET_STACK_BASE
  1259. #endif /* GC_OPENBSD_THREADS */
  1260. #if defined(GC_SOLARIS_THREADS) && !defined(_STRICT_STDC)
  1261. # include <thread.h>
  1262. # include <signal.h>
  1263. # include <pthread.h>
  1264. /* These variables are used to cache ss_sp value for the primordial */
  1265. /* thread (it's better not to call thr_stksegment() twice for this */
  1266. /* thread - see JDK bug #4352906). */
  1267. static pthread_t stackbase_main_self = 0;
  1268. /* 0 means stackbase_main_ss_sp value is unset. */
  1269. static void *stackbase_main_ss_sp = NULL;
  1270. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
  1271. {
  1272. stack_t s;
  1273. pthread_t self = pthread_self();
  1274. if (self == stackbase_main_self)
  1275. {
  1276. /* If the client calls GC_get_stack_base() from the main thread */
  1277. /* then just return the cached value. */
  1278. b -> mem_base = stackbase_main_ss_sp;
  1279. GC_ASSERT(b -> mem_base != NULL);
  1280. return GC_SUCCESS;
  1281. }
  1282. if (thr_stksegment(&s)) {
  1283. /* According to the manual, the only failure error code returned */
  1284. /* is EAGAIN meaning "the information is not available due to the */
  1285. /* thread is not yet completely initialized or it is an internal */
  1286. /* thread" - this shouldn't happen here. */
  1287. ABORT("thr_stksegment failed");
  1288. }
  1289. /* s.ss_sp holds the pointer to the stack bottom. */
  1290. GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
  1291. if (!stackbase_main_self && thr_main() != 0)
  1292. {
  1293. /* Cache the stack base value for the primordial thread (this */
  1294. /* is done during GC_init, so there is no race). */
  1295. stackbase_main_ss_sp = s.ss_sp;
  1296. stackbase_main_self = self;
  1297. }
  1298. b -> mem_base = s.ss_sp;
  1299. return GC_SUCCESS;
  1300. }
  1301. # define HAVE_GET_STACK_BASE
  1302. #endif /* GC_SOLARIS_THREADS */
  1303. #ifdef GC_RTEMS_PTHREADS
  1304. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
  1305. {
  1306. sb->mem_base = rtems_get_stack_bottom();
  1307. return GC_SUCCESS;
  1308. }
  1309. # define HAVE_GET_STACK_BASE
  1310. #endif /* GC_RTEMS_PTHREADS */
  1311. #ifndef HAVE_GET_STACK_BASE
  1312. # ifdef NEED_FIND_LIMIT
  1313. /* Retrieve stack base. */
  1314. /* Using the GC_find_limit version is risky. */
  1315. /* On IA64, for example, there is no guard page between the */
  1316. /* stack of one thread and the register backing store of the */
  1317. /* next. Thus this is likely to identify way too large a */
  1318. /* "stack" and thus at least result in disastrous performance. */
  1319. /* FIXME - Implement better strategies here. */
  1320. GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
  1321. {
  1322. IF_CANCEL(int cancel_state;)
  1323. DCL_LOCK_STATE;
  1324. LOCK();
  1325. DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
  1326. # ifdef STACK_GROWS_DOWN
  1327. b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
  1328. # ifdef IA64
  1329. b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
  1330. # endif
  1331. # else
  1332. b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
  1333. # endif
  1334. RESTORE_CANCEL(cancel_state);
  1335. UNLOCK();
  1336. return GC_SUCCESS;
  1337. }
  1338. # else
  1339. GC_API int GC_CALL GC_get_stack_base(
  1340. struct GC_stack_base *b GC_ATTR_UNUSED)
  1341. {
  1342. # if defined(GET_MAIN_STACKBASE_SPECIAL) && !defined(THREADS) \
  1343. && !defined(IA64)
  1344. b->mem_base = GC_get_main_stack_base();
  1345. return GC_SUCCESS;
  1346. # else
  1347. return GC_UNIMPLEMENTED;
  1348. # endif
  1349. }
  1350. # endif /* !NEED_FIND_LIMIT */
  1351. #endif /* !HAVE_GET_STACK_BASE */
  1352. #ifndef GET_MAIN_STACKBASE_SPECIAL
  1353. /* This is always called from the main thread. Default implementation. */
  1354. ptr_t GC_get_main_stack_base(void)
  1355. {
  1356. struct GC_stack_base sb;
  1357. if (GC_get_stack_base(&sb) != GC_SUCCESS)
  1358. ABORT("GC_get_stack_base failed");
  1359. GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)sb.mem_base);
  1360. return (ptr_t)sb.mem_base;
  1361. }
  1362. #endif /* !GET_MAIN_STACKBASE_SPECIAL */
  1363. /* Register static data segment(s) as roots. If more data segments are */
  1364. /* added later then they need to be registered at that point (as we do */
  1365. /* with SunOS dynamic loading), or GC_mark_roots needs to check for */
  1366. /* them (as we do with PCR). Called with allocator lock held. */
  1367. # ifdef OS2
  1368. void GC_register_data_segments(void)
  1369. {
  1370. PTIB ptib;
  1371. PPIB ppib;
  1372. HMODULE module_handle;
  1373. # define PBUFSIZ 512
  1374. UCHAR path[PBUFSIZ];
  1375. FILE * myexefile;
  1376. struct exe_hdr hdrdos; /* MSDOS header. */
  1377. struct e32_exe hdr386; /* Real header for my executable */
  1378. struct o32_obj seg; /* Current segment */
  1379. int nsegs;
  1380. # if defined(CPPCHECK)
  1381. hdrdos.padding[0] = 0; /* to prevent "field unused" warnings */
  1382. hdr386.exe_format_level = 0;
  1383. hdr386.os = 0;
  1384. hdr386.padding1[0] = 0;
  1385. hdr386.padding2[0] = 0;
  1386. seg.pagemap = 0;
  1387. seg.mapsize = 0;
  1388. seg.reserved = 0;
  1389. # endif
  1390. if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
  1391. ABORT("DosGetInfoBlocks failed");
  1392. }
  1393. module_handle = ppib -> pib_hmte;
  1394. if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
  1395. ABORT("DosQueryModuleName failed");
  1396. }
  1397. myexefile = fopen(path, "rb");
  1398. if (myexefile == 0) {
  1399. ABORT_ARG1("Failed to open executable", ": %s", path);
  1400. }
  1401. if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
  1402. < sizeof(hdrdos)) {
  1403. ABORT_ARG1("Could not read MSDOS header", " from: %s", path);
  1404. }
  1405. if (E_MAGIC(hdrdos) != EMAGIC) {
  1406. ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
  1407. }
  1408. if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
  1409. ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
  1410. }
  1411. if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
  1412. < sizeof(hdr386)) {
  1413. ABORT_ARG1("Could not read OS/2 header", " from: %s", path);
  1414. }
  1415. if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
  1416. ABORT_ARG1("Bad OS/2 magic number", " in file: %s", path);
  1417. }
  1418. if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
  1419. ABORT_ARG1("Bad byte order in executable", " file: %s", path);
  1420. }
  1421. if (E32_CPU(hdr386) == E32CPU286) {
  1422. ABORT_ARG1("GC cannot handle 80286 executables", ": %s", path);
  1423. }
  1424. if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
  1425. SEEK_SET) != 0) {
  1426. ABORT_ARG1("Seek to object table failed", " in file: %s", path);
  1427. }
  1428. for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
  1429. int flags;
  1430. if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
  1431. ABORT_ARG1("Could not read obj table entry", " from file: %s", path);
  1432. }
  1433. flags = O32_FLAGS(seg);
  1434. if (!(flags & OBJWRITE)) continue;
  1435. if (!(flags & OBJREAD)) continue;
  1436. if (flags & OBJINVALID) {
  1437. GC_err_printf("Object with invalid pages?\n");
  1438. continue;
  1439. }
  1440. GC_add_roots_inner((ptr_t)O32_BASE(seg),
  1441. (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
  1442. }
  1443. (void)fclose(myexefile);
  1444. }
  1445. # else /* !OS2 */
  1446. # if defined(GWW_VDB)
  1447. # ifndef MEM_WRITE_WATCH
  1448. # define MEM_WRITE_WATCH 0x200000
  1449. # endif
  1450. # ifndef WRITE_WATCH_FLAG_RESET
  1451. # define WRITE_WATCH_FLAG_RESET 1
  1452. # endif
  1453. /* Since we can't easily check whether ULONG_PTR and SIZE_T are */
  1454. /* defined in Win32 basetsd.h, we define own ULONG_PTR. */
  1455. # define GC_ULONG_PTR word
  1456. typedef UINT (WINAPI * GetWriteWatch_type)(
  1457. DWORD, PVOID, GC_ULONG_PTR /* SIZE_T */,
  1458. PVOID *, GC_ULONG_PTR *, PULONG);
  1459. static GetWriteWatch_type GetWriteWatch_func;
  1460. static DWORD GetWriteWatch_alloc_flag;
  1461. # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
  1462. static void detect_GetWriteWatch(void)
  1463. {
  1464. static GC_bool done;
  1465. HMODULE hK32;
  1466. if (done)
  1467. return;
  1468. # if defined(MPROTECT_VDB)
  1469. {
  1470. char * str = GETENV("GC_USE_GETWRITEWATCH");
  1471. # if defined(GC_PREFER_MPROTECT_VDB)
  1472. if (str == NULL || (*str == '0' && *(str + 1) == '\0')) {
  1473. /* GC_USE_GETWRITEWATCH is unset or set to "0". */
  1474. done = TRUE; /* falling back to MPROTECT_VDB strategy. */
  1475. /* This should work as if GWW_VDB is undefined. */
  1476. return;
  1477. }
  1478. # else
  1479. if (str != NULL && *str == '0' && *(str + 1) == '\0') {
  1480. /* GC_USE_GETWRITEWATCH is set "0". */
  1481. done = TRUE; /* falling back to MPROTECT_VDB strategy. */
  1482. return;
  1483. }
  1484. # endif
  1485. }
  1486. # endif
  1487. # ifdef MSWINRT_FLAVOR
  1488. {
  1489. MEMORY_BASIC_INFORMATION memInfo;
  1490. SIZE_T result = VirtualQuery(GetProcAddress,
  1491. &memInfo, sizeof(memInfo));
  1492. if (result != sizeof(memInfo))
  1493. ABORT("Weird VirtualQuery result");
  1494. hK32 = (HMODULE)memInfo.AllocationBase;
  1495. }
  1496. # else
  1497. hK32 = GetModuleHandle(TEXT("kernel32.dll"));
  1498. # endif
  1499. if (hK32 != (HMODULE)0 &&
  1500. (GetWriteWatch_func = (GetWriteWatch_type)GetProcAddress(hK32,
  1501. "GetWriteWatch")) != NULL) {
  1502. /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
  1503. /* as some versions of kernel32.dll have one but not the */
  1504. /* other, making the feature completely broken. */
  1505. void * page = VirtualAlloc(NULL, GC_page_size,
  1506. MEM_WRITE_WATCH | MEM_RESERVE,
  1507. PAGE_READWRITE);
  1508. if (page != NULL) {
  1509. PVOID pages[16];
  1510. GC_ULONG_PTR count = 16;
  1511. DWORD page_size;
  1512. /* Check that it actually works. In spite of some */
  1513. /* documentation it actually seems to exist on W2K. */
  1514. /* This test may be unnecessary, but ... */
  1515. if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
  1516. page, GC_page_size,
  1517. pages,
  1518. &count,
  1519. &page_size) != 0) {
  1520. /* GetWriteWatch always fails. */
  1521. GetWriteWatch_func = NULL;
  1522. } else {
  1523. GetWriteWatch_alloc_flag = MEM_WRITE_WATCH;
  1524. }
  1525. VirtualFree(page, 0 /* dwSize */, MEM_RELEASE);
  1526. } else {
  1527. /* GetWriteWatch will be useless. */
  1528. GetWriteWatch_func = NULL;
  1529. }
  1530. }
  1531. # ifndef SMALL_CONFIG
  1532. if (GetWriteWatch_func == NULL) {
  1533. GC_COND_LOG_PRINTF("Did not find a usable GetWriteWatch()\n");
  1534. } else {
  1535. GC_COND_LOG_PRINTF("Using GetWriteWatch()\n");
  1536. }
  1537. # endif
  1538. done = TRUE;
  1539. }
  1540. # else
  1541. # define GetWriteWatch_alloc_flag 0
  1542. # endif /* !GWW_VDB */
  1543. # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  1544. # ifdef MSWIN32
  1545. /* Unfortunately, we have to handle win32s very differently from NT, */
  1546. /* Since VirtualQuery has very different semantics. In particular, */
  1547. /* under win32s a VirtualQuery call on an unmapped page returns an */
  1548. /* invalid result. Under NT, GC_register_data_segments is a no-op */
  1549. /* and all real work is done by GC_register_dynamic_libraries. Under */
  1550. /* win32s, we cannot find the data segments associated with dll's. */
  1551. /* We register the main data segment here. */
  1552. GC_INNER GC_bool GC_no_win32_dlls = FALSE;
  1553. /* This used to be set for gcc, to avoid dealing with */
  1554. /* the structured exception handling issues. But we now have */
  1555. /* assembly code to do that right. */
  1556. GC_INNER GC_bool GC_wnt = FALSE;
  1557. /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
  1558. GC_INNER void GC_init_win32(void)
  1559. {
  1560. # if defined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800)
  1561. /* MS Visual Studio 2013 deprecates GetVersion, but on the other */
  1562. /* hand it cannot be used to target pre-Win2K. */
  1563. GC_wnt = TRUE;
  1564. # else
  1565. /* Set GC_wnt. If we're running under win32s, assume that no */
  1566. /* DLLs will be loaded. I doubt anyone still runs win32s, but... */
  1567. DWORD v = GetVersion();
  1568. GC_wnt = !(v & 0x80000000);
  1569. GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
  1570. # endif
  1571. # ifdef USE_MUNMAP
  1572. if (GC_no_win32_dlls) {
  1573. /* Turn off unmapping for safety (since may not work well with */
  1574. /* GlobalAlloc). */
  1575. GC_unmap_threshold = 0;
  1576. }
  1577. # endif
  1578. }
  1579. /* Return the smallest address a such that VirtualQuery */
  1580. /* returns correct results for all addresses between a and start. */
  1581. /* Assumes VirtualQuery returns correct information for start. */
  1582. STATIC ptr_t GC_least_described_address(ptr_t start)
  1583. {
  1584. MEMORY_BASIC_INFORMATION buf;
  1585. LPVOID limit;
  1586. ptr_t p;
  1587. limit = GC_sysinfo.lpMinimumApplicationAddress;
  1588. p = (ptr_t)((word)start & ~(GC_page_size - 1));
  1589. for (;;) {
  1590. size_t result;
  1591. LPVOID q = (LPVOID)(p - GC_page_size);
  1592. if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
  1593. result = VirtualQuery(q, &buf, sizeof(buf));
  1594. if (result != sizeof(buf) || buf.AllocationBase == 0) break;
  1595. p = (ptr_t)(buf.AllocationBase);
  1596. }
  1597. return p;
  1598. }
  1599. # endif /* MSWIN32 */
  1600. # ifndef REDIRECT_MALLOC
  1601. /* We maintain a linked list of AllocationBase values that we know */
  1602. /* correspond to malloc heap sections. Currently this is only called */
  1603. /* during a GC. But there is some hope that for long running */
  1604. /* programs we will eventually see most heap sections. */
  1605. /* In the long run, it would be more reliable to occasionally walk */
  1606. /* the malloc heap with HeapWalk on the default heap. But that */
  1607. /* apparently works only for NT-based Windows. */
  1608. STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
  1609. # ifdef USE_WINALLOC
  1610. /* In the long run, a better data structure would also be nice ... */
  1611. STATIC struct GC_malloc_heap_list {
  1612. void * allocation_base;
  1613. struct GC_malloc_heap_list *next;
  1614. } *GC_malloc_heap_l = 0;
  1615. /* Is p the base of one of the malloc heap sections we already know */
  1616. /* about? */
  1617. STATIC GC_bool GC_is_malloc_heap_base(void *p)
  1618. {
  1619. struct GC_malloc_heap_list *q = GC_malloc_heap_l;
  1620. while (0 != q) {
  1621. if (q -> allocation_base == p) return TRUE;
  1622. q = q -> next;
  1623. }
  1624. return FALSE;
  1625. }
  1626. STATIC void *GC_get_allocation_base(void *p)
  1627. {
  1628. MEMORY_BASIC_INFORMATION buf;
  1629. size_t result = VirtualQuery(p, &buf, sizeof(buf));
  1630. if (result != sizeof(buf)) {
  1631. ABORT("Weird VirtualQuery result");
  1632. }
  1633. return buf.AllocationBase;
  1634. }
  1635. GC_INNER void GC_add_current_malloc_heap(void)
  1636. {
  1637. struct GC_malloc_heap_list *new_l =
  1638. malloc(sizeof(struct GC_malloc_heap_list));
  1639. void * candidate = GC_get_allocation_base(new_l);
  1640. if (new_l == 0) return;
  1641. if (GC_is_malloc_heap_base(candidate)) {
  1642. /* Try a little harder to find malloc heap. */
  1643. size_t req_size = 10000;
  1644. do {
  1645. void *p = malloc(req_size);
  1646. if (0 == p) {
  1647. free(new_l);
  1648. return;
  1649. }
  1650. candidate = GC_get_allocation_base(p);
  1651. free(p);
  1652. req_size *= 2;
  1653. } while (GC_is_malloc_heap_base(candidate)
  1654. && req_size < GC_max_root_size/10 && req_size < 500000);
  1655. if (GC_is_malloc_heap_base(candidate)) {
  1656. free(new_l);
  1657. return;
  1658. }
  1659. }
  1660. GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n",
  1661. candidate);
  1662. new_l -> allocation_base = candidate;
  1663. new_l -> next = GC_malloc_heap_l;
  1664. GC_malloc_heap_l = new_l;
  1665. }
  1666. # endif /* USE_WINALLOC */
  1667. # endif /* !REDIRECT_MALLOC */
  1668. STATIC word GC_n_heap_bases = 0; /* See GC_heap_bases. */
  1669. /* Is p the start of either the malloc heap, or of one of our */
  1670. /* heap sections? */
  1671. GC_INNER GC_bool GC_is_heap_base(void *p)
  1672. {
  1673. unsigned i;
  1674. # ifndef REDIRECT_MALLOC
  1675. if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
  1676. # ifdef USE_WINALLOC
  1677. if (GC_is_malloc_heap_base(p)) return TRUE;
  1678. # endif
  1679. # endif
  1680. for (i = 0; i < GC_n_heap_bases; i++) {
  1681. if (GC_heap_bases[i] == p) return TRUE;
  1682. }
  1683. return FALSE;
  1684. }
  1685. #ifdef MSWIN32
  1686. STATIC void GC_register_root_section(ptr_t static_root)
  1687. {
  1688. MEMORY_BASIC_INFORMATION buf;
  1689. LPVOID p;
  1690. char * base;
  1691. char * limit;
  1692. if (!GC_no_win32_dlls) return;
  1693. p = base = limit = GC_least_described_address(static_root);
  1694. while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
  1695. size_t result = VirtualQuery(p, &buf, sizeof(buf));
  1696. char * new_limit;
  1697. DWORD protect;
  1698. if (result != sizeof(buf) || buf.AllocationBase == 0
  1699. || GC_is_heap_base(buf.AllocationBase)) break;
  1700. new_limit = (char *)p + buf.RegionSize;
  1701. protect = buf.Protect;
  1702. if (buf.State == MEM_COMMIT
  1703. && is_writable(protect)) {
  1704. if ((char *)p == limit) {
  1705. limit = new_limit;
  1706. } else {
  1707. if (base != limit) GC_add_roots_inner(base, limit, FALSE);
  1708. base = (char *)p;
  1709. limit = new_limit;
  1710. }
  1711. }
  1712. if ((word)p > (word)new_limit /* overflow */) break;
  1713. p = (LPVOID)new_limit;
  1714. }
  1715. if (base != limit) GC_add_roots_inner(base, limit, FALSE);
  1716. }
  1717. #endif /* MSWIN32 */
  1718. void GC_register_data_segments(void)
  1719. {
  1720. # ifdef MSWIN32
  1721. GC_register_root_section((ptr_t)&GC_pages_executable);
  1722. /* any other GC global variable would fit too. */
  1723. # endif
  1724. }
  1725. # else /* !OS2 && !Windows */
  1726. # if (defined(SVR4) || defined(AIX) || defined(DGUX) \
  1727. || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
  1728. ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
  1729. {
  1730. word text_end = ((word)(etext_addr) + sizeof(word) - 1)
  1731. & ~(word)(sizeof(word) - 1);
  1732. /* etext rounded to word boundary */
  1733. word next_page = ((text_end + (word)max_page_size - 1)
  1734. & ~((word)max_page_size - 1));
  1735. word page_offset = (text_end & ((word)max_page_size - 1));
  1736. char * volatile result = (char *)(next_page + page_offset);
  1737. /* Note that this isn't equivalent to just adding */
  1738. /* max_page_size to &etext if &etext is at a page boundary */
  1739. GC_setup_temporary_fault_handler();
  1740. if (SETJMP(GC_jmp_buf) == 0) {
  1741. /* Try writing to the address. */
  1742. # ifdef AO_HAVE_fetch_and_add
  1743. volatile AO_t zero = 0;
  1744. (void)AO_fetch_and_add((volatile AO_t *)result, zero);
  1745. # else
  1746. /* Fallback to non-atomic fetch-and-store. */
  1747. char v = *result;
  1748. # if defined(CPPCHECK)
  1749. GC_noop1((word)&v);
  1750. # endif
  1751. *result = v;
  1752. # endif
  1753. GC_reset_fault_handler();
  1754. } else {
  1755. GC_reset_fault_handler();
  1756. /* We got here via a longjmp. The address is not readable. */
  1757. /* This is known to happen under Solaris 2.4 + gcc, which place */
  1758. /* string constants in the text segment, but after etext. */
  1759. /* Use plan B. Note that we now know there is a gap between */
  1760. /* text and data segments, so plan A brought us something. */
  1761. result = (char *)GC_find_limit(DATAEND, FALSE);
  1762. }
  1763. return((ptr_t)result);
  1764. }
  1765. # endif
  1766. #ifdef DATASTART_USES_BSDGETDATASTART
  1767. /* Its unclear whether this should be identical to the above, or */
  1768. /* whether it should apply to non-X86 architectures. */
  1769. /* For now we don't assume that there is always an empty page after */
  1770. /* etext. But in some cases there actually seems to be slightly more. */
  1771. /* This also deals with holes between read-only data and writable data. */
  1772. GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size,
  1773. ptr_t etext_addr)
  1774. {
  1775. word text_end = ((word)(etext_addr) + sizeof(word) - 1)
  1776. & ~(word)(sizeof(word) - 1);
  1777. /* etext rounded to word boundary */
  1778. volatile word next_page = (text_end + (word)max_page_size - 1)
  1779. & ~((word)max_page_size - 1);
  1780. volatile ptr_t result = (ptr_t)text_end;
  1781. GC_setup_temporary_fault_handler();
  1782. if (SETJMP(GC_jmp_buf) == 0) {
  1783. /* Try reading at the address. */
  1784. /* This should happen before there is another thread. */
  1785. for (; next_page < (word)DATAEND; next_page += (word)max_page_size)
  1786. *(volatile char *)next_page;
  1787. GC_reset_fault_handler();
  1788. } else {
  1789. GC_reset_fault_handler();
  1790. /* As above, we go to plan B */
  1791. result = GC_find_limit(DATAEND, FALSE);
  1792. }
  1793. return(result);
  1794. }
  1795. #endif /* DATASTART_USES_BSDGETDATASTART */
  1796. #ifdef AMIGA
  1797. # define GC_AMIGA_DS
  1798. # include "extra/AmigaOS.c"
  1799. # undef GC_AMIGA_DS
  1800. #elif defined(OPENBSD)
  1801. /* Depending on arch alignment, there can be multiple holes */
  1802. /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */
  1803. /* and register each region. */
  1804. void GC_register_data_segments(void)
  1805. {
  1806. ptr_t region_start = DATASTART;
  1807. if ((word)region_start - 1U >= (word)DATAEND)
  1808. ABORT_ARG2("Wrong DATASTART/END pair",
  1809. ": %p .. %p", (void *)region_start, (void *)DATAEND);
  1810. for (;;) {
  1811. ptr_t region_end = GC_find_limit_openbsd(region_start, DATAEND);
  1812. GC_add_roots_inner(region_start, region_end, FALSE);
  1813. if ((word)region_end >= (word)DATAEND)
  1814. break;
  1815. region_start = GC_skip_hole_openbsd(region_end, DATAEND);
  1816. }
  1817. }
  1818. # else /* !OS2 && !Windows && !AMIGA && !OPENBSD */
  1819. # if !defined(PCR) && !defined(MACOS) && defined(REDIRECT_MALLOC) \
  1820. && defined(GC_SOLARIS_THREADS)
  1821. EXTERN_C_BEGIN
  1822. extern caddr_t sbrk(int);
  1823. EXTERN_C_END
  1824. # endif
  1825. void GC_register_data_segments(void)
  1826. {
  1827. # if !defined(PCR) && !defined(MACOS)
  1828. # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
  1829. /* As of Solaris 2.3, the Solaris threads implementation */
  1830. /* allocates the data structure for the initial thread with */
  1831. /* sbrk at process startup. It needs to be scanned, so that */
  1832. /* we don't lose some malloc allocated data structures */
  1833. /* hanging from it. We're on thin ice here ... */
  1834. GC_ASSERT(DATASTART);
  1835. {
  1836. ptr_t p = (ptr_t)sbrk(0);
  1837. if ((word)DATASTART < (word)p)
  1838. GC_add_roots_inner(DATASTART, p, FALSE);
  1839. }
  1840. # else
  1841. if ((word)DATASTART - 1U >= (word)DATAEND) {
  1842. /* Subtract one to check also for NULL */
  1843. /* without a compiler warning. */
  1844. ABORT_ARG2("Wrong DATASTART/END pair",
  1845. ": %p .. %p", (void *)DATASTART, (void *)DATAEND);
  1846. }
  1847. GC_add_roots_inner(DATASTART, DATAEND, FALSE);
  1848. # ifdef GC_HAVE_DATAREGION2
  1849. if ((word)DATASTART2 - 1U >= (word)DATAEND2)
  1850. ABORT_ARG2("Wrong DATASTART/END2 pair",
  1851. ": %p .. %p", (void *)DATASTART2, (void *)DATAEND2);
  1852. GC_add_roots_inner(DATASTART2, DATAEND2, FALSE);
  1853. # endif
  1854. # endif
  1855. # endif
  1856. # if defined(MACOS)
  1857. {
  1858. # if defined(THINK_C)
  1859. extern void* GC_MacGetDataStart(void);
  1860. /* globals begin above stack and end at a5. */
  1861. GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
  1862. (ptr_t)LMGetCurrentA5(), FALSE);
  1863. # else
  1864. # if defined(__MWERKS__)
  1865. # if !__POWERPC__
  1866. extern void* GC_MacGetDataStart(void);
  1867. /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
  1868. # if __option(far_data)
  1869. extern void* GC_MacGetDataEnd(void);
  1870. # endif
  1871. /* globals begin above stack and end at a5. */
  1872. GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
  1873. (ptr_t)LMGetCurrentA5(), FALSE);
  1874. /* MATTHEW: Handle Far Globals */
  1875. # if __option(far_data)
  1876. /* Far globals follow he QD globals: */
  1877. GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
  1878. (ptr_t)GC_MacGetDataEnd(), FALSE);
  1879. # endif
  1880. # else
  1881. extern char __data_start__[], __data_end__[];
  1882. GC_add_roots_inner((ptr_t)&__data_start__,
  1883. (ptr_t)&__data_end__, FALSE);
  1884. # endif /* __POWERPC__ */
  1885. # endif /* __MWERKS__ */
  1886. # endif /* !THINK_C */
  1887. }
  1888. # endif /* MACOS */
  1889. /* Dynamic libraries are added at every collection, since they may */
  1890. /* change. */
  1891. }
  1892. # endif /* !AMIGA */
  1893. # endif /* !MSWIN32 && !MSWINCE */
  1894. # endif /* !OS2 */
  1895. /*
  1896. * Auxiliary routines for obtaining memory from OS.
  1897. */
  1898. # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
  1899. && !defined(USE_WINALLOC) && !defined(MACOS) && !defined(DOS4GW) \
  1900. && !defined(NINTENDO_SWITCH) && !defined(NONSTOP) \
  1901. && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PS3) \
  1902. && !defined(SN_TARGET_PSP2) && !defined(RTEMS) && !defined(__CC_ARM)
  1903. # define SBRK_ARG_T ptrdiff_t
  1904. #if defined(MMAP_SUPPORTED)
  1905. #ifdef USE_MMAP_FIXED
  1906. # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
  1907. /* Seems to yield better performance on Solaris 2, but can */
  1908. /* be unreliable if something is already mapped at the address. */
  1909. #else
  1910. # define GC_MMAP_FLAGS MAP_PRIVATE
  1911. #endif
  1912. #ifdef USE_MMAP_ANON
  1913. # define zero_fd -1
  1914. # if defined(MAP_ANONYMOUS) && !defined(CPPCHECK)
  1915. # define OPT_MAP_ANON MAP_ANONYMOUS
  1916. # else
  1917. # define OPT_MAP_ANON MAP_ANON
  1918. # endif
  1919. #else
  1920. static int zero_fd = -1;
  1921. # define OPT_MAP_ANON 0
  1922. #endif
  1923. # ifndef MSWIN_XBOX1
  1924. # if defined(SYMBIAN) && !defined(USE_MMAP_ANON)
  1925. EXTERN_C_BEGIN
  1926. extern char *GC_get_private_path_and_zero_file(void);
  1927. EXTERN_C_END
  1928. # endif
  1929. STATIC ptr_t GC_unix_mmap_get_mem(size_t bytes)
  1930. {
  1931. void *result;
  1932. static ptr_t last_addr = HEAP_START;
  1933. # ifndef USE_MMAP_ANON
  1934. static GC_bool initialized = FALSE;
  1935. if (!EXPECT(initialized, TRUE)) {
  1936. # ifdef SYMBIAN
  1937. char *path = GC_get_private_path_and_zero_file();
  1938. if (path != NULL) {
  1939. zero_fd = open(path, O_RDWR | O_CREAT, 0666);
  1940. free(path);
  1941. }
  1942. # else
  1943. zero_fd = open("/dev/zero", O_RDONLY);
  1944. # endif
  1945. if (zero_fd == -1)
  1946. ABORT("Could not open /dev/zero");
  1947. if (fcntl(zero_fd, F_SETFD, FD_CLOEXEC) == -1)
  1948. WARN("Could not set FD_CLOEXEC for /dev/zero\n", 0);
  1949. initialized = TRUE;
  1950. }
  1951. # endif
  1952. if (bytes & (GC_page_size - 1)) ABORT("Bad GET_MEM arg");
  1953. result = mmap(last_addr, bytes, (PROT_READ | PROT_WRITE)
  1954. | (GC_pages_executable ? PROT_EXEC : 0),
  1955. GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
  1956. # undef IGNORE_PAGES_EXECUTABLE
  1957. if (result == MAP_FAILED) return(0);
  1958. last_addr = (ptr_t)(((word)result + bytes + GC_page_size - 1)
  1959. & ~(GC_page_size - 1));
  1960. # if !defined(LINUX)
  1961. if (last_addr == 0) {
  1962. /* Oops. We got the end of the address space. This isn't */
  1963. /* usable by arbitrary C code, since one-past-end pointers */
  1964. /* don't work, so we discard it and try again. */
  1965. munmap(result, ~GC_page_size - (size_t)result + 1);
  1966. /* Leave last page mapped, so we can't repeat. */
  1967. return GC_unix_mmap_get_mem(bytes);
  1968. }
  1969. # else
  1970. GC_ASSERT(last_addr != 0);
  1971. # endif
  1972. if (((word)result % HBLKSIZE) != 0)
  1973. ABORT(
  1974. "GC_unix_get_mem: Memory returned by mmap is not aligned to HBLKSIZE.");
  1975. return((ptr_t)result);
  1976. }
  1977. # endif /* !MSWIN_XBOX1 */
  1978. #endif /* MMAP_SUPPORTED */
  1979. #if defined(USE_MMAP)
  1980. ptr_t GC_unix_get_mem(size_t bytes)
  1981. {
  1982. return GC_unix_mmap_get_mem(bytes);
  1983. }
  1984. #else /* !USE_MMAP */
  1985. STATIC ptr_t GC_unix_sbrk_get_mem(size_t bytes)
  1986. {
  1987. ptr_t result;
  1988. # ifdef IRIX5
  1989. /* Bare sbrk isn't thread safe. Play by malloc rules. */
  1990. /* The equivalent may be needed on other systems as well. */
  1991. __LOCK_MALLOC();
  1992. # endif
  1993. {
  1994. ptr_t cur_brk = (ptr_t)sbrk(0);
  1995. SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
  1996. if ((SBRK_ARG_T)bytes < 0) {
  1997. result = 0; /* too big */
  1998. goto out;
  1999. }
  2000. if (lsbs != 0) {
  2001. if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs) == (ptr_t)(-1)) {
  2002. result = 0;
  2003. goto out;
  2004. }
  2005. }
  2006. # ifdef ADD_HEAP_GUARD_PAGES
  2007. /* This is useful for catching severe memory overwrite problems that */
  2008. /* span heap sections. It shouldn't otherwise be turned on. */
  2009. {
  2010. ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size);
  2011. if (mprotect(guard, GC_page_size, PROT_NONE) != 0)
  2012. ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
  2013. }
  2014. # endif /* ADD_HEAP_GUARD_PAGES */
  2015. result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
  2016. if (result == (ptr_t)(-1)) result = 0;
  2017. }
  2018. out:
  2019. # ifdef IRIX5
  2020. __UNLOCK_MALLOC();
  2021. # endif
  2022. return(result);
  2023. }
  2024. ptr_t GC_unix_get_mem(size_t bytes)
  2025. {
  2026. # if defined(MMAP_SUPPORTED)
  2027. /* By default, we try both sbrk and mmap, in that order. */
  2028. static GC_bool sbrk_failed = FALSE;
  2029. ptr_t result = 0;
  2030. if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
  2031. if (0 == result) {
  2032. sbrk_failed = TRUE;
  2033. result = GC_unix_mmap_get_mem(bytes);
  2034. }
  2035. if (0 == result) {
  2036. /* Try sbrk again, in case sbrk memory became available. */
  2037. result = GC_unix_sbrk_get_mem(bytes);
  2038. }
  2039. return result;
  2040. # else /* !MMAP_SUPPORTED */
  2041. return GC_unix_sbrk_get_mem(bytes);
  2042. # endif
  2043. }
  2044. #endif /* !USE_MMAP */
  2045. # endif /* UN*X */
  2046. # ifdef OS2
  2047. void * os2_alloc(size_t bytes)
  2048. {
  2049. void * result;
  2050. if (DosAllocMem(&result, bytes, (PAG_READ | PAG_WRITE | PAG_COMMIT)
  2051. | (GC_pages_executable ? PAG_EXECUTE : 0))
  2052. != NO_ERROR) {
  2053. return(0);
  2054. }
  2055. /* FIXME: What's the purpose of this recursion? (Probably, if */
  2056. /* DosAllocMem returns memory at 0 address then just retry once.) */
  2057. if (result == 0) return(os2_alloc(bytes));
  2058. return(result);
  2059. }
  2060. # endif /* OS2 */
  2061. # ifdef MSWIN_XBOX1
  2062. void *durango_get_mem(size_t bytes, size_t page_size)
  2063. {
  2064. if (0 == bytes) return NULL;
  2065. return VirtualAlloc(NULL, bytes, MEM_COMMIT | MEM_TOP_DOWN,
  2066. PAGE_READWRITE);
  2067. }
  2068. #endif
  2069. #ifdef MSWINCE
  2070. ptr_t GC_wince_get_mem(size_t bytes)
  2071. {
  2072. ptr_t result = 0; /* initialized to prevent warning. */
  2073. word i;
  2074. bytes = ROUNDUP_PAGESIZE(bytes);
  2075. /* Try to find reserved, uncommitted pages */
  2076. for (i = 0; i < GC_n_heap_bases; i++) {
  2077. if (((word)(-(signed_word)GC_heap_lengths[i])
  2078. & (GC_sysinfo.dwAllocationGranularity-1))
  2079. >= bytes) {
  2080. result = GC_heap_bases[i] + GC_heap_lengths[i];
  2081. break;
  2082. }
  2083. }
  2084. if (i == GC_n_heap_bases) {
  2085. /* Reserve more pages */
  2086. size_t res_bytes =
  2087. SIZET_SAT_ADD(bytes, (size_t)GC_sysinfo.dwAllocationGranularity-1)
  2088. & ~((size_t)GC_sysinfo.dwAllocationGranularity-1);
  2089. /* If we ever support MPROTECT_VDB here, we will probably need to */
  2090. /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
  2091. /* never spans regions. It seems to be OK for a VirtualFree */
  2092. /* argument to span regions, so we should be OK for now. */
  2093. result = (ptr_t) VirtualAlloc(NULL, res_bytes,
  2094. MEM_RESERVE | MEM_TOP_DOWN,
  2095. GC_pages_executable ? PAGE_EXECUTE_READWRITE :
  2096. PAGE_READWRITE);
  2097. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  2098. /* If I read the documentation correctly, this can */
  2099. /* only happen if HBLKSIZE > 64k or not a power of 2. */
  2100. if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
  2101. if (result == NULL) return NULL;
  2102. GC_heap_bases[GC_n_heap_bases] = result;
  2103. GC_heap_lengths[GC_n_heap_bases] = 0;
  2104. GC_n_heap_bases++;
  2105. }
  2106. /* Commit pages */
  2107. result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT,
  2108. GC_pages_executable ? PAGE_EXECUTE_READWRITE :
  2109. PAGE_READWRITE);
  2110. # undef IGNORE_PAGES_EXECUTABLE
  2111. if (result != NULL) {
  2112. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  2113. GC_heap_lengths[i] += bytes;
  2114. }
  2115. return(result);
  2116. }
  2117. #elif (defined(USE_WINALLOC) && !defined(MSWIN_XBOX1)) || defined(CYGWIN32)
  2118. # ifdef USE_GLOBAL_ALLOC
  2119. # define GLOBAL_ALLOC_TEST 1
  2120. # else
  2121. # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
  2122. # endif
  2123. # if (defined(GC_USE_MEM_TOP_DOWN) && defined(USE_WINALLOC)) \
  2124. || defined(CPPCHECK)
  2125. DWORD GC_mem_top_down = MEM_TOP_DOWN;
  2126. /* Use GC_USE_MEM_TOP_DOWN for better 64-bit */
  2127. /* testing. Otherwise all addresses tend to */
  2128. /* end up in first 4GB, hiding bugs. */
  2129. # else
  2130. # define GC_mem_top_down 0
  2131. # endif /* !GC_USE_MEM_TOP_DOWN */
  2132. ptr_t GC_win32_get_mem(size_t bytes)
  2133. {
  2134. ptr_t result;
  2135. # ifndef USE_WINALLOC
  2136. result = GC_unix_get_mem(bytes);
  2137. # else
  2138. # if defined(MSWIN32) && !defined(MSWINRT_FLAVOR)
  2139. if (GLOBAL_ALLOC_TEST) {
  2140. /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
  2141. /* There are also unconfirmed rumors of other */
  2142. /* problems, so we dodge the issue. */
  2143. result = (ptr_t)GlobalAlloc(0, SIZET_SAT_ADD(bytes, HBLKSIZE));
  2144. /* Align it at HBLKSIZE boundary. */
  2145. result = (ptr_t)(((word)result + HBLKSIZE - 1)
  2146. & ~(word)(HBLKSIZE - 1));
  2147. } else
  2148. # endif
  2149. /* else */ {
  2150. /* VirtualProtect only works on regions returned by a */
  2151. /* single VirtualAlloc call. Thus we allocate one */
  2152. /* extra page, which will prevent merging of blocks */
  2153. /* in separate regions, and eliminate any temptation */
  2154. /* to call VirtualProtect on a range spanning regions. */
  2155. /* This wastes a small amount of memory, and risks */
  2156. /* increased fragmentation. But better alternatives */
  2157. /* would require effort. */
  2158. # ifdef MPROTECT_VDB
  2159. /* We can't check for GC_incremental here (because */
  2160. /* GC_enable_incremental() might be called some time */
  2161. /* later after the GC initialization). */
  2162. # ifdef GWW_VDB
  2163. # define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE() ? 0 : 1)
  2164. # else
  2165. # define VIRTUAL_ALLOC_PAD 1
  2166. # endif
  2167. # else
  2168. # define VIRTUAL_ALLOC_PAD 0
  2169. # endif
  2170. /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
  2171. /* VDBs are enabled and the GetWriteWatch function is */
  2172. /* available. Otherwise we waste resources or possibly */
  2173. /* cause VirtualAlloc to fail (observed in Windows 2000 */
  2174. /* SP2). */
  2175. result = (ptr_t) VirtualAlloc(NULL,
  2176. SIZET_SAT_ADD(bytes, VIRTUAL_ALLOC_PAD),
  2177. GetWriteWatch_alloc_flag
  2178. | (MEM_COMMIT | MEM_RESERVE)
  2179. | GC_mem_top_down,
  2180. GC_pages_executable ? PAGE_EXECUTE_READWRITE :
  2181. PAGE_READWRITE);
  2182. # undef IGNORE_PAGES_EXECUTABLE
  2183. }
  2184. # endif /* USE_WINALLOC */
  2185. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  2186. /* If I read the documentation correctly, this can */
  2187. /* only happen if HBLKSIZE > 64k or not a power of 2. */
  2188. if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
  2189. if (0 != result) GC_heap_bases[GC_n_heap_bases++] = result;
  2190. return(result);
  2191. }
  2192. GC_API void GC_CALL GC_win32_free_heap(void)
  2193. {
  2194. # ifndef MSWINRT_FLAVOR
  2195. # ifndef CYGWIN32
  2196. if (GLOBAL_ALLOC_TEST)
  2197. # endif
  2198. {
  2199. while (GC_n_heap_bases-- > 0) {
  2200. # ifdef CYGWIN32
  2201. /* FIXME: Is it OK to use non-GC free() here? */
  2202. # else
  2203. GlobalFree(GC_heap_bases[GC_n_heap_bases]);
  2204. # endif
  2205. GC_heap_bases[GC_n_heap_bases] = 0;
  2206. }
  2207. return;
  2208. }
  2209. # endif
  2210. # ifndef CYGWIN32
  2211. /* Avoiding VirtualAlloc leak. */
  2212. while (GC_n_heap_bases > 0) {
  2213. VirtualFree(GC_heap_bases[--GC_n_heap_bases], 0, MEM_RELEASE);
  2214. GC_heap_bases[GC_n_heap_bases] = 0;
  2215. }
  2216. # endif
  2217. }
  2218. #endif /* USE_WINALLOC || CYGWIN32 */
  2219. #ifdef AMIGA
  2220. # define GC_AMIGA_AM
  2221. # include "extra/AmigaOS.c"
  2222. # undef GC_AMIGA_AM
  2223. #endif
  2224. #if defined(HAIKU)
  2225. # include <stdlib.h>
  2226. ptr_t GC_haiku_get_mem(size_t bytes)
  2227. {
  2228. void* mem;
  2229. GC_ASSERT(GC_page_size != 0);
  2230. if (posix_memalign(&mem, GC_page_size, bytes) == 0)
  2231. return mem;
  2232. return NULL;
  2233. }
  2234. #endif /* HAIKU */
  2235. #ifdef USE_MUNMAP
  2236. /* For now, this only works on Win32/WinCE and some Unix-like */
  2237. /* systems. If you have something else, don't define */
  2238. /* USE_MUNMAP. */
  2239. #if !defined(NN_PLATFORM_CTR) && !defined(MSWIN32) && !defined(MSWINCE) \
  2240. && !defined(MSWIN_XBOX1)
  2241. # include <unistd.h>
  2242. # ifdef SN_TARGET_PS3
  2243. # include <sys/memory.h>
  2244. # else
  2245. # include <sys/mman.h>
  2246. # endif
  2247. # include <sys/stat.h>
  2248. # include <sys/types.h>
  2249. #endif
  2250. /* Compute a page aligned starting address for the unmap */
  2251. /* operation on a block of size bytes starting at start. */
  2252. /* Return 0 if the block is too small to make this feasible. */
  2253. STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
  2254. {
  2255. ptr_t result = (ptr_t)(((word)start + GC_page_size - 1)
  2256. & ~(GC_page_size - 1));
  2257. if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
  2258. return result;
  2259. }
  2260. /* Compute end address for an unmap operation on the indicated */
  2261. /* block. */
  2262. STATIC ptr_t GC_unmap_end(ptr_t start, size_t bytes)
  2263. {
  2264. return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
  2265. }
  2266. /* Under Win32/WinCE we commit (map) and decommit (unmap) */
  2267. /* memory using VirtualAlloc and VirtualFree. These functions */
  2268. /* work on individual allocations of virtual memory, made */
  2269. /* previously using VirtualAlloc with the MEM_RESERVE flag. */
  2270. /* The ranges we need to (de)commit may span several of these */
  2271. /* allocations; therefore we use VirtualQuery to check */
  2272. /* allocation lengths, and split up the range as necessary. */
  2273. /* We assume that GC_remap is called on exactly the same range */
  2274. /* as a previous call to GC_unmap. It is safe to consistently */
  2275. /* round the endpoints in both places. */
  2276. GC_INNER void GC_unmap(ptr_t start, size_t bytes)
  2277. {
  2278. ptr_t start_addr = GC_unmap_start(start, bytes);
  2279. ptr_t end_addr = GC_unmap_end(start, bytes);
  2280. word len = end_addr - start_addr;
  2281. if (0 == start_addr) return;
  2282. # ifdef USE_WINALLOC
  2283. while (len != 0) {
  2284. MEMORY_BASIC_INFORMATION mem_info;
  2285. word free_len;
  2286. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  2287. != sizeof(mem_info))
  2288. ABORT("Weird VirtualQuery result");
  2289. free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  2290. if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
  2291. ABORT("VirtualFree failed");
  2292. GC_unmapped_bytes += free_len;
  2293. start_addr += free_len;
  2294. len -= free_len;
  2295. }
  2296. # elif defined(SN_TARGET_PS3)
  2297. ps3_free_mem(start_addr, len);
  2298. # else
  2299. /* We immediately remap it to prevent an intervening mmap from */
  2300. /* accidentally grabbing the same address space. */
  2301. {
  2302. # ifdef CYGWIN32
  2303. /* Calling mmap() with the new protection flags on an */
  2304. /* existing memory map with MAP_FIXED is broken on Cygwin. */
  2305. /* However, calling mprotect() on the given address range */
  2306. /* with PROT_NONE seems to work fine. */
  2307. if (mprotect(start_addr, len, PROT_NONE))
  2308. ABORT("mprotect(PROT_NONE) failed");
  2309. # else
  2310. void * result = mmap(start_addr, len, PROT_NONE,
  2311. MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
  2312. zero_fd, 0/* offset */);
  2313. if (result != (void *)start_addr)
  2314. ABORT("mmap(PROT_NONE) failed");
  2315. # if defined(CPPCHECK) || defined(LINT2)
  2316. /* Explicitly store the resource handle to a global variable. */
  2317. GC_noop1((word)result);
  2318. # endif
  2319. # endif /* !CYGWIN32 */
  2320. }
  2321. GC_unmapped_bytes += len;
  2322. # endif
  2323. }
  2324. GC_INNER void GC_remap(ptr_t start, size_t bytes)
  2325. {
  2326. ptr_t start_addr = GC_unmap_start(start, bytes);
  2327. ptr_t end_addr = GC_unmap_end(start, bytes);
  2328. word len = end_addr - start_addr;
  2329. if (0 == start_addr) return;
  2330. /* FIXME: Handle out-of-memory correctly (at least for Win32) */
  2331. # ifdef USE_WINALLOC
  2332. while (len != 0) {
  2333. MEMORY_BASIC_INFORMATION mem_info;
  2334. word alloc_len;
  2335. ptr_t result;
  2336. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  2337. != sizeof(mem_info))
  2338. ABORT("Weird VirtualQuery result");
  2339. alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  2340. result = VirtualAlloc(start_addr, alloc_len, MEM_COMMIT,
  2341. GC_pages_executable ? PAGE_EXECUTE_READWRITE :
  2342. PAGE_READWRITE);
  2343. if (result != start_addr) {
  2344. if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY ||
  2345. GetLastError() == ERROR_OUTOFMEMORY) {
  2346. ABORT("Not enough memory to process remapping");
  2347. } else {
  2348. ABORT("VirtualAlloc remapping failed");
  2349. }
  2350. }
  2351. # ifdef LINT2
  2352. GC_noop1((word)result);
  2353. # endif
  2354. GC_unmapped_bytes -= alloc_len;
  2355. start_addr += alloc_len;
  2356. len -= alloc_len;
  2357. }
  2358. # else
  2359. /* It was already remapped with PROT_NONE. */
  2360. {
  2361. # ifdef NACL
  2362. /* NaCl does not expose mprotect, but mmap should work fine. */
  2363. void *result = mmap(start_addr, len, (PROT_READ | PROT_WRITE)
  2364. | (GC_pages_executable ? PROT_EXEC : 0),
  2365. MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
  2366. zero_fd, 0 /* offset */);
  2367. if (result != (void *)start_addr)
  2368. ABORT("mmap as mprotect failed");
  2369. # if defined(CPPCHECK) || defined(LINT2)
  2370. GC_noop1((word)result);
  2371. # endif
  2372. # else
  2373. if (mprotect(start_addr, len, (PROT_READ | PROT_WRITE)
  2374. | (GC_pages_executable ? PROT_EXEC : 0)) != 0) {
  2375. ABORT_ARG3("mprotect remapping failed",
  2376. " at %p (length %lu), errcode= %d",
  2377. (void *)start_addr, (unsigned long)len, errno);
  2378. }
  2379. # endif /* !NACL */
  2380. }
  2381. # undef IGNORE_PAGES_EXECUTABLE
  2382. GC_unmapped_bytes -= len;
  2383. # endif
  2384. }
  2385. /* Two adjacent blocks have already been unmapped and are about to */
  2386. /* be merged. Unmap the whole block. This typically requires */
  2387. /* that we unmap a small section in the middle that was not previously */
  2388. /* unmapped due to alignment constraints. */
  2389. GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
  2390. size_t bytes2)
  2391. {
  2392. ptr_t start1_addr = GC_unmap_start(start1, bytes1);
  2393. ptr_t end1_addr = GC_unmap_end(start1, bytes1);
  2394. ptr_t start2_addr = GC_unmap_start(start2, bytes2);
  2395. ptr_t start_addr = end1_addr;
  2396. ptr_t end_addr = start2_addr;
  2397. size_t len;
  2398. GC_ASSERT(start1 + bytes1 == start2);
  2399. if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
  2400. if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
  2401. if (0 == start_addr) return;
  2402. len = end_addr - start_addr;
  2403. # ifdef USE_WINALLOC
  2404. while (len != 0) {
  2405. MEMORY_BASIC_INFORMATION mem_info;
  2406. word free_len;
  2407. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  2408. != sizeof(mem_info))
  2409. ABORT("Weird VirtualQuery result");
  2410. free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  2411. if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
  2412. ABORT("VirtualFree failed");
  2413. GC_unmapped_bytes += free_len;
  2414. start_addr += free_len;
  2415. len -= free_len;
  2416. }
  2417. # else
  2418. if (len != 0) {
  2419. /* Immediately remap as above. */
  2420. # ifdef CYGWIN32
  2421. if (mprotect(start_addr, len, PROT_NONE))
  2422. ABORT("mprotect(PROT_NONE) failed");
  2423. # else
  2424. void * result = mmap(start_addr, len, PROT_NONE,
  2425. MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
  2426. zero_fd, 0/* offset */);
  2427. if (result != (void *)start_addr)
  2428. ABORT("mmap(PROT_NONE) failed");
  2429. # if defined(CPPCHECK) || defined(LINT2)
  2430. GC_noop1((word)result);
  2431. # endif
  2432. # endif /* !CYGWIN32 */
  2433. GC_unmapped_bytes += len;
  2434. }
  2435. # endif
  2436. }
  2437. #endif /* USE_MUNMAP */
  2438. /* Routine for pushing any additional roots. In THREADS */
  2439. /* environment, this is also responsible for marking from */
  2440. /* thread stacks. */
  2441. #ifndef THREADS
  2442. GC_push_other_roots_proc GC_push_other_roots = 0;
  2443. #else /* THREADS */
  2444. # ifdef PCR
  2445. PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
  2446. {
  2447. struct PCR_ThCtl_TInfoRep info;
  2448. PCR_ERes result;
  2449. info.ti_stkLow = info.ti_stkHi = 0;
  2450. result = PCR_ThCtl_GetInfo(t, &info);
  2451. GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
  2452. return(result);
  2453. }
  2454. /* Push the contents of an old object. We treat this as stack */
  2455. /* data only because that makes it robust against mark stack */
  2456. /* overflow. */
  2457. PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
  2458. {
  2459. GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
  2460. return(PCR_ERes_okay);
  2461. }
  2462. extern struct PCR_MM_ProcsRep * GC_old_allocator;
  2463. /* defined in pcr_interface.c. */
  2464. STATIC void GC_CALLBACK GC_default_push_other_roots(void)
  2465. {
  2466. /* Traverse data allocated by previous memory managers. */
  2467. if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
  2468. GC_push_old_obj, 0)
  2469. != PCR_ERes_okay) {
  2470. ABORT("Old object enumeration failed");
  2471. }
  2472. /* Traverse all thread stacks. */
  2473. if (PCR_ERes_IsErr(
  2474. PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
  2475. || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
  2476. ABORT("Thread stack marking failed");
  2477. }
  2478. }
  2479. # endif /* PCR */
  2480. # if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
  2481. || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
  2482. STATIC void GC_CALLBACK GC_default_push_other_roots(void)
  2483. {
  2484. GC_push_all_stacks();
  2485. }
  2486. # endif
  2487. # ifdef SN_TARGET_PS3
  2488. STATIC void GC_CALLBACK GC_default_push_other_roots(void)
  2489. {
  2490. ABORT("GC_default_push_other_roots is not implemented");
  2491. }
  2492. void GC_push_thread_structures(void)
  2493. {
  2494. ABORT("GC_push_thread_structures is not implemented");
  2495. }
  2496. # endif /* SN_TARGET_PS3 */
  2497. GC_push_other_roots_proc GC_push_other_roots = GC_default_push_other_roots;
  2498. #endif /* THREADS */
  2499. GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc fn)
  2500. {
  2501. GC_push_other_roots = fn;
  2502. }
  2503. GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void)
  2504. {
  2505. return GC_push_other_roots;
  2506. }
  2507. void GC_reset_default_push_other_roots(void)
  2508. {
  2509. #ifdef THREADS
  2510. GC_push_other_roots = GC_default_push_other_roots;
  2511. #else
  2512. GC_push_other_roots = 0;
  2513. #endif
  2514. }
  2515. GC_push_other_roots_proc GC_on_mark_stack_empty;
  2516. GC_API void GC_CALL GC_set_mark_stack_empty (GC_mark_stack_empty_proc fn)
  2517. {
  2518. GC_on_mark_stack_empty = fn;
  2519. }
  2520. GC_API GC_mark_stack_empty_proc GC_CALL GC_get_mark_stack_empty (void)
  2521. {
  2522. return GC_on_mark_stack_empty;
  2523. }
  2524. /*
  2525. * Routines for accessing dirty bits on virtual pages.
  2526. * There are six ways to maintain this information:
  2527. * DEFAULT_VDB: A simple dummy implementation that treats every page
  2528. * as possibly dirty. This makes incremental collection
  2529. * useless, but the implementation is still correct.
  2530. * MANUAL_VDB: Stacks and static data are always considered dirty.
  2531. * Heap pages are considered dirty if GC_dirty(p) has been
  2532. * called on some pointer p pointing to somewhere inside
  2533. * an object on that page. A GC_dirty() call on a large
  2534. * object directly dirties only a single page, but for
  2535. * MANUAL_VDB we are careful to treat an object with a dirty
  2536. * page as completely dirty.
  2537. * In order to avoid races, an object must be marked dirty
  2538. * after it is written, and a reference to the object
  2539. * must be kept on a stack or in a register in the interim.
  2540. * With threads enabled, an object directly reachable from the
  2541. * stack at the time of a collection is treated as dirty.
  2542. * In single-threaded mode, it suffices to ensure that no
  2543. * collection can take place between the pointer assignment
  2544. * and the GC_dirty() call.
  2545. * PCR_VDB: Use PPCRs virtual dirty bit facility.
  2546. * PROC_VDB: Use the /proc facility for reading dirty bits. Only
  2547. * works under some SVR4 variants. Even then, it may be
  2548. * too slow to be entirely satisfactory. Requires reading
  2549. * dirty bits for entire address space. Implementations tend
  2550. * to assume that the client is a (slow) debugger.
  2551. * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
  2552. * dirtied pages. The implementation (and implementability)
  2553. * is highly system dependent. This usually fails when system
  2554. * calls write to a protected page. We prevent the read system
  2555. * call from doing so. It is the clients responsibility to
  2556. * make sure that other system calls are similarly protected
  2557. * or write only to the stack.
  2558. * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
  2559. * read dirty bits. In case it is not available (because we
  2560. * are running on Windows 95, Windows 2000 or earlier),
  2561. * MPROTECT_VDB may be defined as a fallback strategy.
  2562. */
  2563. #if defined(GWW_VDB) || defined(MPROTECT_VDB) || defined(PROC_VDB) \
  2564. || defined(MANUAL_VDB)
  2565. /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
  2566. /* If the actual page size is different, this returns TRUE if any */
  2567. /* of the pages overlapping h are dirty. This routine may err on the */
  2568. /* side of labeling pages as dirty (and this implementation does). */
  2569. GC_INNER GC_bool GC_page_was_dirty(struct hblk * h)
  2570. {
  2571. word index;
  2572. if (HDR(h) == 0)
  2573. return TRUE;
  2574. index = PHT_HASH(h);
  2575. return get_pht_entry_from_index(GC_grungy_pages, index);
  2576. }
  2577. #endif
  2578. #if (defined(CHECKSUMS) && defined(GWW_VDB)) || defined(PROC_VDB)
  2579. /* Add all pages in pht2 to pht1. */
  2580. STATIC void GC_or_pages(page_hash_table pht1, page_hash_table pht2)
  2581. {
  2582. unsigned i;
  2583. for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
  2584. }
  2585. /* Used only if GWW_VDB. */
  2586. # ifdef MPROTECT_VDB
  2587. STATIC GC_bool GC_gww_page_was_ever_dirty(struct hblk * h)
  2588. # else
  2589. GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h)
  2590. # endif
  2591. {
  2592. word index;
  2593. if (HDR(h) == 0)
  2594. return TRUE;
  2595. index = PHT_HASH(h);
  2596. return get_pht_entry_from_index(GC_written_pages, index);
  2597. }
  2598. #endif /* CHECKSUMS && GWW_VDB || PROC_VDB */
  2599. #if ((defined(GWW_VDB) || defined(PROC_VDB)) && !defined(MPROTECT_VDB)) \
  2600. || defined(MANUAL_VDB) || defined(DEFAULT_VDB)
  2601. /* Ignore write hints. They don't help us here. */
  2602. GC_INNER void GC_remove_protection(struct hblk * h GC_ATTR_UNUSED,
  2603. word nblocks GC_ATTR_UNUSED,
  2604. GC_bool is_ptrfree GC_ATTR_UNUSED) {}
  2605. #endif
  2606. #ifdef GWW_VDB
  2607. # define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
  2608. /* Still susceptible to overflow, if there are very large allocations, */
  2609. /* and everything is dirty. */
  2610. static PVOID gww_buf[GC_GWW_BUF_LEN];
  2611. # ifndef MPROTECT_VDB
  2612. # define GC_gww_dirty_init GC_dirty_init
  2613. # endif
  2614. GC_INNER GC_bool GC_gww_dirty_init(void)
  2615. {
  2616. detect_GetWriteWatch();
  2617. return GC_GWW_AVAILABLE();
  2618. }
  2619. # ifdef MPROTECT_VDB
  2620. STATIC void GC_gww_read_dirty(GC_bool output_unneeded)
  2621. # else
  2622. GC_INNER void GC_read_dirty(GC_bool output_unneeded)
  2623. # endif
  2624. {
  2625. word i;
  2626. if (!output_unneeded)
  2627. BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
  2628. for (i = 0; i != GC_n_heap_sects; ++i) {
  2629. GC_ULONG_PTR count;
  2630. do {
  2631. PVOID * pages = gww_buf;
  2632. DWORD page_size;
  2633. count = GC_GWW_BUF_LEN;
  2634. /* GetWriteWatch is documented as returning non-zero when it */
  2635. /* fails, but the documentation doesn't explicitly say why it */
  2636. /* would fail or what its behaviour will be if it fails. */
  2637. /* It does appear to fail, at least on recent W2K instances, if */
  2638. /* the underlying memory was not allocated with the appropriate */
  2639. /* flag. This is common if GC_enable_incremental is called */
  2640. /* shortly after GC initialization. To avoid modifying the */
  2641. /* interface, we silently work around such a failure, it only */
  2642. /* affects the initial (small) heap allocation. If there are */
  2643. /* more dirty pages than will fit in the buffer, this is not */
  2644. /* treated as a failure; we must check the page count in the */
  2645. /* loop condition. Since each partial call will reset the */
  2646. /* status of some pages, this should eventually terminate even */
  2647. /* in the overflow case. */
  2648. if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
  2649. GC_heap_sects[i].hs_start,
  2650. GC_heap_sects[i].hs_bytes,
  2651. pages,
  2652. &count,
  2653. &page_size) != 0) {
  2654. static int warn_count = 0;
  2655. struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
  2656. static struct hblk *last_warned = 0;
  2657. size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
  2658. if (i != 0 && last_warned != start && warn_count++ < 5) {
  2659. last_warned = start;
  2660. WARN("GC_gww_read_dirty unexpectedly failed at %p: "
  2661. "Falling back to marking all pages dirty\n", start);
  2662. }
  2663. if (!output_unneeded) {
  2664. unsigned j;
  2665. for (j = 0; j < nblocks; ++j) {
  2666. word hash = PHT_HASH(start + j);
  2667. set_pht_entry_from_index(GC_grungy_pages, hash);
  2668. }
  2669. }
  2670. count = 1; /* Done with this section. */
  2671. } else /* succeeded */ if (!output_unneeded) {
  2672. PVOID * pages_end = pages + count;
  2673. while (pages != pages_end) {
  2674. struct hblk * h = (struct hblk *) *pages++;
  2675. struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
  2676. do {
  2677. set_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
  2678. } while ((word)(++h) < (word)h_end);
  2679. }
  2680. }
  2681. } while (count == GC_GWW_BUF_LEN);
  2682. /* FIXME: It's unclear from Microsoft's documentation if this loop */
  2683. /* is useful. We suspect the call just fails if the buffer fills */
  2684. /* up. But that should still be handled correctly. */
  2685. }
  2686. # ifdef CHECKSUMS
  2687. GC_ASSERT(!output_unneeded);
  2688. GC_or_pages(GC_written_pages, GC_grungy_pages);
  2689. # endif
  2690. }
  2691. #endif /* GWW_VDB */
  2692. #ifdef DEFAULT_VDB
  2693. /* All of the following assume the allocation lock is held. */
  2694. /* The client asserts that unallocated pages in the heap are never */
  2695. /* written. */
  2696. /* Initialize virtual dirty bit implementation. */
  2697. GC_INNER GC_bool GC_dirty_init(void)
  2698. {
  2699. GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n");
  2700. return TRUE;
  2701. }
  2702. /* Retrieve system dirty bits for heap to a local buffer. */
  2703. /* Restore the systems notion of which pages are dirty. */
  2704. GC_INNER void GC_read_dirty(GC_bool output_unneeded GC_ATTR_UNUSED) {}
  2705. /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
  2706. /* If the actual page size is different, this returns TRUE if any */
  2707. /* of the pages overlapping h are dirty. This routine may err on the */
  2708. /* side of labeling pages as dirty (and this implementation does). */
  2709. GC_INNER GC_bool GC_page_was_dirty(struct hblk * h GC_ATTR_UNUSED)
  2710. {
  2711. return(TRUE);
  2712. }
  2713. /* The following two routines are typically less crucial. */
  2714. /* They matter most with large dynamic libraries, or if we can't */
  2715. /* accurately identify stacks, e.g. under Solaris 2.X. Otherwise the */
  2716. /* following default versions are adequate. */
  2717. # ifdef CHECKSUMS
  2718. /* Could any valid GC heap pointer ever have been written to this page? */
  2719. GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
  2720. {
  2721. return(TRUE);
  2722. }
  2723. # endif /* CHECKSUMS */
  2724. #endif /* DEFAULT_VDB */
  2725. #ifdef MANUAL_VDB
  2726. /* Initialize virtual dirty bit implementation. */
  2727. GC_INNER GC_bool GC_dirty_init(void)
  2728. {
  2729. GC_VERBOSE_LOG_PRINTF("Initializing MANUAL_VDB...\n");
  2730. /* GC_dirty_pages and GC_grungy_pages are already cleared. */
  2731. return TRUE;
  2732. }
  2733. /* Retrieve system dirty bits for the heap to a local buffer */
  2734. /* (unless output_unneeded). Restore the systems notion of */
  2735. /* which pages are dirty. */
  2736. GC_INNER void GC_read_dirty(GC_bool output_unneeded)
  2737. {
  2738. if (!output_unneeded)
  2739. BCOPY((word *)GC_dirty_pages, GC_grungy_pages, sizeof(GC_dirty_pages));
  2740. BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
  2741. }
  2742. #ifndef GC_DISABLE_INCREMENTAL
  2743. # ifndef THREADS
  2744. # define async_set_pht_entry_from_index(db, index) \
  2745. set_pht_entry_from_index(db, index)
  2746. # elif defined(set_pht_entry_from_index_concurrent)
  2747. # define async_set_pht_entry_from_index(db, index) \
  2748. set_pht_entry_from_index_concurrent(db, index)
  2749. # elif defined(AO_HAVE_test_and_set_acquire)
  2750. /* We need to lock around the bitmap update (in the write fault */
  2751. /* handler or GC_dirty) in order to avoid the risk of losing a bit. */
  2752. /* We do this with a test-and-set spin lock if possible. */
  2753. GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
  2754. static void async_set_pht_entry_from_index(volatile page_hash_table db,
  2755. size_t index)
  2756. {
  2757. GC_acquire_dirty_lock();
  2758. set_pht_entry_from_index(db, index);
  2759. GC_release_dirty_lock();
  2760. }
  2761. # else
  2762. # error No test_and_set operation: Introduces a race.
  2763. # endif /* THREADS && !AO_HAVE_test_and_set_acquire */
  2764. #else
  2765. # define async_set_pht_entry_from_index(db, index)
  2766. #endif /* !GC_DISABLE_INCREMENTAL */
  2767. /* Mark the page containing p as dirty. Logically, this dirties the */
  2768. /* entire object. */
  2769. #if !IL2CPP_ENABLE_WRITE_BARRIER_VALIDATION
  2770. GC_API void GC_dirty_inner(const void *p)
  2771. {
  2772. word index = PHT_HASH(p);
  2773. async_set_pht_entry_from_index(GC_dirty_pages, index);
  2774. }
  2775. #endif
  2776. # ifdef CHECKSUMS
  2777. /* Could any valid GC heap pointer ever have been written to this page? */
  2778. GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
  2779. {
  2780. /* FIXME - implement me. */
  2781. return(TRUE);
  2782. }
  2783. # endif /* CHECKSUMS */
  2784. #endif /* MANUAL_VDB */
  2785. #ifdef MPROTECT_VDB
  2786. /* See DEFAULT_VDB for interface descriptions. */
  2787. /*
  2788. * This implementation maintains dirty bits itself by catching write
  2789. * faults and keeping track of them. We assume nobody else catches
  2790. * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
  2791. * This means that clients must ensure that system calls don't write
  2792. * to the write-protected heap. Probably the best way to do this is to
  2793. * ensure that system calls write at most to pointer-free objects in the
  2794. * heap, and do even that only if we are on a platform on which those
  2795. * are not protected. Another alternative is to wrap system calls
  2796. * (see example for read below), but the current implementation holds
  2797. * applications.
  2798. * We assume the page size is a multiple of HBLKSIZE.
  2799. * We prefer them to be the same. We avoid protecting pointer-free
  2800. * objects only if they are the same.
  2801. */
  2802. # ifdef DARWIN
  2803. /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
  2804. decrease the likelihood of some of the problems described below. */
  2805. # include <mach/vm_map.h>
  2806. STATIC mach_port_t GC_task_self = 0;
  2807. # define PROTECT(addr,len) \
  2808. if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
  2809. FALSE, VM_PROT_READ \
  2810. | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
  2811. == KERN_SUCCESS) {} else ABORT("vm_protect(PROTECT) failed")
  2812. # define UNPROTECT(addr,len) \
  2813. if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
  2814. FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
  2815. | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
  2816. == KERN_SUCCESS) {} else ABORT("vm_protect(UNPROTECT) failed")
  2817. # elif !defined(USE_WINALLOC)
  2818. # include <sys/mman.h>
  2819. # include <signal.h>
  2820. # if !defined(HAIKU)
  2821. # include <sys/syscall.h>
  2822. # endif
  2823. # define PROTECT(addr, len) \
  2824. if (mprotect((caddr_t)(addr), (size_t)(len), \
  2825. PROT_READ \
  2826. | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
  2827. } else ABORT("mprotect failed")
  2828. # define UNPROTECT(addr, len) \
  2829. if (mprotect((caddr_t)(addr), (size_t)(len), \
  2830. (PROT_READ | PROT_WRITE) \
  2831. | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
  2832. } else ABORT(GC_pages_executable ? \
  2833. "un-mprotect executable page failed" \
  2834. " (probably disabled by OS)" : \
  2835. "un-mprotect failed")
  2836. # undef IGNORE_PAGES_EXECUTABLE
  2837. # else /* USE_WINALLOC */
  2838. # ifndef MSWINCE
  2839. # include <signal.h>
  2840. # endif
  2841. static DWORD protect_junk;
  2842. # define PROTECT(addr, len) \
  2843. if (VirtualProtect((addr), (len), \
  2844. GC_pages_executable ? PAGE_EXECUTE_READ : \
  2845. PAGE_READONLY, \
  2846. &protect_junk)) { \
  2847. } else ABORT_ARG1("VirtualProtect failed", \
  2848. ": errcode= 0x%X", (unsigned)GetLastError())
  2849. # define UNPROTECT(addr, len) \
  2850. if (VirtualProtect((addr), (len), \
  2851. GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
  2852. PAGE_READWRITE, \
  2853. &protect_junk)) { \
  2854. } else ABORT("un-VirtualProtect failed")
  2855. # endif /* USE_WINALLOC */
  2856. # if defined(MSWIN32)
  2857. typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
  2858. # undef SIG_DFL
  2859. # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1)
  2860. # elif defined(MSWINCE)
  2861. typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
  2862. # undef SIG_DFL
  2863. # define SIG_DFL (SIG_HNDLR_PTR) (-1)
  2864. # elif defined(DARWIN)
  2865. typedef void (* SIG_HNDLR_PTR)();
  2866. # else
  2867. typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
  2868. typedef void (* PLAIN_HNDLR_PTR)(int);
  2869. # endif
  2870. # if defined(__GLIBC__)
  2871. # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
  2872. # error glibc too old?
  2873. # endif
  2874. # endif
  2875. #ifndef DARWIN
  2876. STATIC SIG_HNDLR_PTR GC_old_segv_handler = 0;
  2877. /* Also old MSWIN32 ACCESS_VIOLATION filter */
  2878. # if !defined(MSWIN32) && !defined(MSWINCE)
  2879. STATIC SIG_HNDLR_PTR GC_old_bus_handler = 0;
  2880. # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
  2881. STATIC GC_bool GC_old_bus_handler_used_si = FALSE;
  2882. # endif
  2883. STATIC GC_bool GC_old_segv_handler_used_si = FALSE;
  2884. # endif /* !MSWIN32 */
  2885. #endif /* !DARWIN */
  2886. #ifdef THREADS
  2887. /* This function is used only by the fault handler. Potential data */
  2888. /* race between this function and GC_install_header, GC_remove_header */
  2889. /* should not be harmful because the added or removed header should */
  2890. /* be already unprotected. */
  2891. GC_ATTR_NO_SANITIZE_THREAD
  2892. static GC_bool is_header_found_async(void *addr)
  2893. {
  2894. # ifdef HASH_TL
  2895. hdr *result;
  2896. GET_HDR((ptr_t)addr, result);
  2897. return result != NULL;
  2898. # else
  2899. return HDR_INNER(addr) != NULL;
  2900. # endif
  2901. }
  2902. #else
  2903. # define is_header_found_async(addr) (HDR(addr) != NULL)
  2904. #endif /* !THREADS */
  2905. #ifndef DARWIN
  2906. # if !defined(MSWIN32) && !defined(MSWINCE)
  2907. # include <errno.h>
  2908. # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
  2909. # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
  2910. # else
  2911. # define SIG_OK (sig == SIGSEGV)
  2912. /* Catch SIGSEGV but ignore SIGBUS. */
  2913. # endif
  2914. # if defined(FREEBSD)
  2915. # ifndef SEGV_ACCERR
  2916. # define SEGV_ACCERR 2
  2917. # endif
  2918. # if defined(AARCH64) || defined(ARM32) || defined(MIPS)
  2919. # define CODE_OK (si -> si_code == SEGV_ACCERR)
  2920. # elif defined(POWERPC)
  2921. # define AIM /* Pretend that we're AIM. */
  2922. # include <machine/trap.h>
  2923. # define CODE_OK (si -> si_code == EXC_DSI \
  2924. || si -> si_code == SEGV_ACCERR)
  2925. # else
  2926. # define CODE_OK (si -> si_code == BUS_PAGE_FAULT \
  2927. || si -> si_code == SEGV_ACCERR)
  2928. # endif
  2929. # elif defined(OSF1)
  2930. # define CODE_OK (si -> si_code == 2 /* experimentally determined */)
  2931. # elif defined(IRIX5)
  2932. # define CODE_OK (si -> si_code == EACCES)
  2933. # elif defined(HAIKU) || defined(HURD)
  2934. # define CODE_OK TRUE
  2935. # elif defined(LINUX)
  2936. # define CODE_OK TRUE
  2937. /* Empirically c.trapno == 14, on IA32, but is that useful? */
  2938. /* Should probably consider alignment issues on other */
  2939. /* architectures. */
  2940. # elif defined(HPUX)
  2941. # define CODE_OK (si -> si_code == SEGV_ACCERR \
  2942. || si -> si_code == BUS_ADRERR \
  2943. || si -> si_code == BUS_UNKNOWN \
  2944. || si -> si_code == SEGV_UNKNOWN \
  2945. || si -> si_code == BUS_OBJERR)
  2946. # elif defined(SUNOS5SIGS)
  2947. # define CODE_OK (si -> si_code == SEGV_ACCERR)
  2948. # endif
  2949. # ifndef NO_GETCONTEXT
  2950. # include <ucontext.h>
  2951. # endif
  2952. STATIC void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
  2953. # else
  2954. # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
  2955. == STATUS_ACCESS_VIOLATION)
  2956. # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
  2957. == 1) /* Write fault */
  2958. STATIC LONG WINAPI GC_write_fault_handler(
  2959. struct _EXCEPTION_POINTERS *exc_info)
  2960. # endif /* MSWIN32 || MSWINCE */
  2961. {
  2962. # if !defined(MSWIN32) && !defined(MSWINCE)
  2963. char *addr = (char *)si->si_addr;
  2964. # else
  2965. char * addr = (char *) (exc_info -> ExceptionRecord
  2966. -> ExceptionInformation[1]);
  2967. # endif
  2968. if (SIG_OK && CODE_OK) {
  2969. struct hblk * h = (struct hblk *)((word)addr & ~(GC_page_size-1));
  2970. GC_bool in_allocd_block;
  2971. size_t i;
  2972. # ifdef CHECKSUMS
  2973. GC_record_fault(h);
  2974. # endif
  2975. # ifdef SUNOS5SIGS
  2976. /* Address is only within the correct physical page. */
  2977. in_allocd_block = FALSE;
  2978. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  2979. if (is_header_found_async(&h[i])) {
  2980. in_allocd_block = TRUE;
  2981. break;
  2982. }
  2983. }
  2984. # else
  2985. in_allocd_block = is_header_found_async(addr);
  2986. # endif
  2987. if (!in_allocd_block) {
  2988. /* FIXME - We should make sure that we invoke the */
  2989. /* old handler with the appropriate calling */
  2990. /* sequence, which often depends on SA_SIGINFO. */
  2991. /* Heap blocks now begin and end on page boundaries */
  2992. SIG_HNDLR_PTR old_handler;
  2993. # if defined(MSWIN32) || defined(MSWINCE)
  2994. old_handler = GC_old_segv_handler;
  2995. # else
  2996. GC_bool used_si;
  2997. # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
  2998. if (sig == SIGBUS) {
  2999. old_handler = GC_old_bus_handler;
  3000. used_si = GC_old_bus_handler_used_si;
  3001. } else
  3002. # endif
  3003. /* else */ {
  3004. old_handler = GC_old_segv_handler;
  3005. used_si = GC_old_segv_handler_used_si;
  3006. }
  3007. # endif
  3008. if (old_handler == (SIG_HNDLR_PTR)SIG_DFL) {
  3009. # if !defined(MSWIN32) && !defined(MSWINCE)
  3010. ABORT_ARG1("Unexpected bus error or segmentation fault",
  3011. " at %p", (void *)addr);
  3012. # else
  3013. return(EXCEPTION_CONTINUE_SEARCH);
  3014. # endif
  3015. } else {
  3016. /*
  3017. * FIXME: This code should probably check if the
  3018. * old signal handler used the traditional style and
  3019. * if so call it using that style.
  3020. */
  3021. # if defined(MSWIN32) || defined(MSWINCE)
  3022. return((*old_handler)(exc_info));
  3023. # else
  3024. if (used_si)
  3025. ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
  3026. else
  3027. /* FIXME: should pass nonstandard args as well. */
  3028. ((PLAIN_HNDLR_PTR)old_handler) (sig);
  3029. return;
  3030. # endif
  3031. }
  3032. }
  3033. UNPROTECT(h, GC_page_size);
  3034. /* We need to make sure that no collection occurs between */
  3035. /* the UNPROTECT and the setting of the dirty bit. Otherwise */
  3036. /* a write by a third thread might go unnoticed. Reversing */
  3037. /* the order is just as bad, since we would end up unprotecting */
  3038. /* a page in a GC cycle during which it's not marked. */
  3039. /* Currently we do this by disabling the thread stopping */
  3040. /* signals while this handler is running. An alternative might */
  3041. /* be to record the fact that we're about to unprotect, or */
  3042. /* have just unprotected a page in the GC's thread structure, */
  3043. /* and then to have the thread stopping code set the dirty */
  3044. /* flag, if necessary. */
  3045. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  3046. word index = PHT_HASH(h+i);
  3047. async_set_pht_entry_from_index(GC_dirty_pages, index);
  3048. }
  3049. /* The write may not take place before dirty bits are read. */
  3050. /* But then we'll fault again ... */
  3051. # if defined(MSWIN32) || defined(MSWINCE)
  3052. return(EXCEPTION_CONTINUE_EXECUTION);
  3053. # else
  3054. return;
  3055. # endif
  3056. }
  3057. # if defined(MSWIN32) || defined(MSWINCE)
  3058. return EXCEPTION_CONTINUE_SEARCH;
  3059. # else
  3060. ABORT_ARG1("Unexpected bus error or segmentation fault",
  3061. " at %p", (void *)addr);
  3062. # endif
  3063. }
  3064. # ifdef GC_WIN32_THREADS
  3065. GC_INNER void GC_set_write_fault_handler(void)
  3066. {
  3067. SetUnhandledExceptionFilter(GC_write_fault_handler);
  3068. }
  3069. # endif
  3070. #endif /* !DARWIN */
  3071. /* We hold the allocation lock. We expect block h to be written */
  3072. /* shortly. Ensure that all pages containing any part of the n hblks */
  3073. /* starting at h are no longer protected. If is_ptrfree is false, also */
  3074. /* ensure that they will subsequently appear to be dirty. Not allowed */
  3075. /* to call GC_printf (and the friends) here, see Win32 GC_stop_world() */
  3076. /* for the information. */
  3077. GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
  3078. GC_bool is_ptrfree)
  3079. {
  3080. struct hblk * h_trunc; /* Truncated to page boundary */
  3081. struct hblk * h_end; /* Page boundary following block end */
  3082. struct hblk * current;
  3083. # if defined(GWW_VDB)
  3084. if (GC_GWW_AVAILABLE()) return;
  3085. # endif
  3086. if (!GC_incremental) return;
  3087. h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
  3088. h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size - 1)
  3089. & ~(GC_page_size - 1));
  3090. if (h_end == h_trunc + 1 &&
  3091. get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
  3092. /* already marked dirty, and hence unprotected. */
  3093. return;
  3094. }
  3095. for (current = h_trunc; (word)current < (word)h_end; ++current) {
  3096. word index = PHT_HASH(current);
  3097. if (!is_ptrfree || (word)current < (word)h
  3098. || (word)current >= (word)(h + nblocks)) {
  3099. async_set_pht_entry_from_index(GC_dirty_pages, index);
  3100. }
  3101. }
  3102. UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
  3103. }
  3104. #ifdef USE_MUNMAP
  3105. /* MPROTECT_VDB cannot deal with address space holes (for now), */
  3106. /* so if the collector is configured with both MPROTECT_VDB and */
  3107. /* USE_MUNMAP then, as a work around, select only one of them */
  3108. /* during GC_init or GC_enable_incremental. */
  3109. GC_INNER GC_bool GC_dirty_init(void)
  3110. {
  3111. if (GC_unmap_threshold != 0) {
  3112. if (GETENV("GC_UNMAP_THRESHOLD") != NULL
  3113. || GETENV("GC_FORCE_UNMAP_ON_GCOLLECT") != NULL
  3114. || GC_has_unmapped_memory()) {
  3115. WARN("Can't maintain mprotect-based dirty bits"
  3116. " in case of unmapping\n", 0);
  3117. return FALSE;
  3118. }
  3119. GC_unmap_threshold = 0; /* in favor of incremental collection */
  3120. WARN("Memory unmapping is disabled as incompatible"
  3121. " with MPROTECT_VDB\n", 0);
  3122. }
  3123. return GC_mprotect_dirty_init();
  3124. }
  3125. #else
  3126. # define GC_mprotect_dirty_init GC_dirty_init
  3127. #endif /* !USE_MUNMAP */
  3128. #if !defined(DARWIN)
  3129. GC_INNER GC_bool GC_mprotect_dirty_init(void)
  3130. {
  3131. # if !defined(MSWIN32) && !defined(MSWINCE)
  3132. struct sigaction act, oldact;
  3133. act.sa_flags = SA_RESTART | SA_SIGINFO;
  3134. act.sa_sigaction = GC_write_fault_handler;
  3135. (void)sigemptyset(&act.sa_mask);
  3136. # if defined(THREADS) && !defined(GC_OPENBSD_UTHREADS) \
  3137. && !defined(GC_WIN32_THREADS) && !defined(NACL)
  3138. /* Arrange to postpone the signal while we are in a write fault */
  3139. /* handler. This effectively makes the handler atomic w.r.t. */
  3140. /* stopping the world for GC. */
  3141. (void)sigaddset(&act.sa_mask, GC_get_suspend_signal());
  3142. # endif
  3143. # endif /* !MSWIN32 */
  3144. GC_VERBOSE_LOG_PRINTF(
  3145. "Initializing mprotect virtual dirty bit implementation\n");
  3146. if (GC_page_size % HBLKSIZE != 0) {
  3147. ABORT("Page size not multiple of HBLKSIZE");
  3148. }
  3149. # if !defined(MSWIN32) && !defined(MSWINCE)
  3150. /* act.sa_restorer is deprecated and should not be initialized. */
  3151. # if defined(GC_IRIX_THREADS)
  3152. sigaction(SIGSEGV, 0, &oldact);
  3153. sigaction(SIGSEGV, &act, 0);
  3154. # else
  3155. {
  3156. int res = sigaction(SIGSEGV, &act, &oldact);
  3157. if (res != 0) ABORT("Sigaction failed");
  3158. }
  3159. # endif
  3160. if (oldact.sa_flags & SA_SIGINFO) {
  3161. GC_old_segv_handler = oldact.sa_sigaction;
  3162. GC_old_segv_handler_used_si = TRUE;
  3163. } else {
  3164. GC_old_segv_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
  3165. GC_old_segv_handler_used_si = FALSE;
  3166. }
  3167. if (GC_old_segv_handler == (SIG_HNDLR_PTR)SIG_IGN) {
  3168. WARN("Previously ignored segmentation violation!?\n", 0);
  3169. GC_old_segv_handler = (SIG_HNDLR_PTR)SIG_DFL;
  3170. }
  3171. if (GC_old_segv_handler != (SIG_HNDLR_PTR)SIG_DFL) {
  3172. GC_VERBOSE_LOG_PRINTF("Replaced other SIGSEGV handler\n");
  3173. }
  3174. # if defined(HPUX) || defined(LINUX) || defined(HURD) \
  3175. || (defined(FREEBSD) && (defined(__GLIBC__) || defined(SUNOS5SIGS)))
  3176. sigaction(SIGBUS, &act, &oldact);
  3177. if ((oldact.sa_flags & SA_SIGINFO) != 0) {
  3178. GC_old_bus_handler = oldact.sa_sigaction;
  3179. # if !defined(LINUX)
  3180. GC_old_bus_handler_used_si = TRUE;
  3181. # endif
  3182. } else {
  3183. GC_old_bus_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
  3184. # if !defined(LINUX)
  3185. GC_old_bus_handler_used_si = FALSE;
  3186. # endif
  3187. }
  3188. if (GC_old_bus_handler == (SIG_HNDLR_PTR)SIG_IGN) {
  3189. WARN("Previously ignored bus error!?\n", 0);
  3190. # if !defined(LINUX)
  3191. GC_old_bus_handler = (SIG_HNDLR_PTR)SIG_DFL;
  3192. # else
  3193. /* GC_old_bus_handler is not used by GC_write_fault_handler. */
  3194. # endif
  3195. } else if (GC_old_bus_handler != (SIG_HNDLR_PTR)SIG_DFL) {
  3196. GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
  3197. }
  3198. # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
  3199. # endif /* ! MS windows */
  3200. # if defined(GWW_VDB)
  3201. if (GC_gww_dirty_init())
  3202. return TRUE;
  3203. # endif
  3204. # if defined(MSWIN32)
  3205. GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
  3206. if (GC_old_segv_handler != NULL) {
  3207. GC_COND_LOG_PRINTF("Replaced other UnhandledExceptionFilter\n");
  3208. } else {
  3209. GC_old_segv_handler = SIG_DFL;
  3210. }
  3211. # elif defined(MSWINCE)
  3212. /* MPROTECT_VDB is unsupported for WinCE at present. */
  3213. /* FIXME: implement it (if possible). */
  3214. # endif
  3215. # if defined(CPPCHECK) && defined(ADDRESS_SANITIZER)
  3216. GC_noop1((word)&__asan_default_options);
  3217. # endif
  3218. return TRUE;
  3219. }
  3220. #endif /* !DARWIN */
  3221. GC_API int GC_CALL GC_incremental_protection_needs(void)
  3222. {
  3223. GC_ASSERT(GC_is_initialized);
  3224. if (GC_page_size == HBLKSIZE) {
  3225. return GC_PROTECTS_POINTER_HEAP;
  3226. } else {
  3227. return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
  3228. }
  3229. }
  3230. #define HAVE_INCREMENTAL_PROTECTION_NEEDS
  3231. #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
  3232. #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
  3233. STATIC void GC_protect_heap(void)
  3234. {
  3235. unsigned i;
  3236. GC_bool protect_all =
  3237. (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
  3238. for (i = 0; i < GC_n_heap_sects; i++) {
  3239. ptr_t start = GC_heap_sects[i].hs_start;
  3240. size_t len = GC_heap_sects[i].hs_bytes;
  3241. if (protect_all) {
  3242. PROTECT(start, len);
  3243. } else {
  3244. struct hblk * current;
  3245. struct hblk * current_start; /* Start of block to be protected. */
  3246. struct hblk * limit;
  3247. GC_ASSERT(PAGE_ALIGNED(len));
  3248. GC_ASSERT(PAGE_ALIGNED(start));
  3249. current_start = current = (struct hblk *)start;
  3250. limit = (struct hblk *)(start + len);
  3251. while ((word)current < (word)limit) {
  3252. hdr * hhdr;
  3253. word nhblks;
  3254. GC_bool is_ptrfree;
  3255. GC_ASSERT(PAGE_ALIGNED(current));
  3256. GET_HDR(current, hhdr);
  3257. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  3258. /* This can happen only if we're at the beginning of a */
  3259. /* heap segment, and a block spans heap segments. */
  3260. /* We will handle that block as part of the preceding */
  3261. /* segment. */
  3262. GC_ASSERT(current_start == current);
  3263. current_start = ++current;
  3264. continue;
  3265. }
  3266. if (HBLK_IS_FREE(hhdr)) {
  3267. GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
  3268. nhblks = divHBLKSZ(hhdr -> hb_sz);
  3269. is_ptrfree = TRUE; /* dirty on alloc */
  3270. } else {
  3271. nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  3272. is_ptrfree = IS_PTRFREE(hhdr);
  3273. }
  3274. if (is_ptrfree) {
  3275. if ((word)current_start < (word)current) {
  3276. PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
  3277. }
  3278. current_start = (current += nhblks);
  3279. } else {
  3280. current += nhblks;
  3281. }
  3282. }
  3283. if ((word)current_start < (word)current) {
  3284. PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
  3285. }
  3286. }
  3287. }
  3288. }
  3289. /* We assume that either the world is stopped or its OK to lose dirty */
  3290. /* bits while this is happening (as in GC_enable_incremental). */
  3291. GC_INNER void GC_read_dirty(GC_bool output_unneeded)
  3292. {
  3293. # if defined(GWW_VDB)
  3294. if (GC_GWW_AVAILABLE()) {
  3295. GC_gww_read_dirty(output_unneeded);
  3296. return;
  3297. }
  3298. # endif
  3299. if (!output_unneeded)
  3300. BCOPY((word *)GC_dirty_pages, GC_grungy_pages, sizeof(GC_dirty_pages));
  3301. BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
  3302. GC_protect_heap();
  3303. }
  3304. /*
  3305. * Acquiring the allocation lock here is dangerous, since this
  3306. * can be called from within GC_call_with_alloc_lock, and the cord
  3307. * package does so. On systems that allow nested lock acquisition, this
  3308. * happens to work.
  3309. */
  3310. /* We no longer wrap read by default, since that was causing too many */
  3311. /* problems. It is preferred that the client instead avoids writing */
  3312. /* to the write-protected heap with a system call. */
  3313. # ifdef CHECKSUMS
  3314. GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
  3315. {
  3316. # if defined(GWW_VDB)
  3317. if (GC_GWW_AVAILABLE())
  3318. return GC_gww_page_was_ever_dirty(h);
  3319. # endif
  3320. return(TRUE);
  3321. }
  3322. # endif /* CHECKSUMS */
  3323. #endif /* MPROTECT_VDB */
  3324. #ifdef PROC_VDB
  3325. /* See DEFAULT_VDB for interface descriptions. */
  3326. /* This implementation assumes a Solaris 2.X like /proc */
  3327. /* pseudo-file-system from which we can read page modified bits. This */
  3328. /* facility is far from optimal (e.g. we would like to get the info for */
  3329. /* only some of the address space), but it avoids intercepting system */
  3330. /* calls. */
  3331. # include <errno.h>
  3332. # include <sys/types.h>
  3333. # include <sys/signal.h>
  3334. # include <sys/syscall.h>
  3335. # include <sys/stat.h>
  3336. # ifdef GC_NO_SYS_FAULT_H
  3337. /* This exists only to check PROC_VDB code compilation (on Linux). */
  3338. # define PG_MODIFIED 1
  3339. struct prpageheader {
  3340. int dummy[2]; /* pr_tstamp */
  3341. unsigned long pr_nmap;
  3342. unsigned long pr_npage;
  3343. };
  3344. struct prasmap {
  3345. char *pr_vaddr;
  3346. size_t pr_npage;
  3347. char dummy1[64+8]; /* pr_mapname, pr_offset */
  3348. unsigned pr_mflags;
  3349. unsigned pr_pagesize;
  3350. int dummy2[2];
  3351. };
  3352. # else
  3353. # include <sys/fault.h>
  3354. # include <sys/procfs.h>
  3355. # endif
  3356. # define INITIAL_BUF_SZ 16384
  3357. STATIC size_t GC_proc_buf_size = INITIAL_BUF_SZ;
  3358. STATIC char *GC_proc_buf = NULL;
  3359. STATIC int GC_proc_fd = 0;
  3360. GC_INNER GC_bool GC_dirty_init(void)
  3361. {
  3362. char buf[40];
  3363. if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
  3364. memset(GC_written_pages, 0xff, sizeof(page_hash_table));
  3365. GC_VERBOSE_LOG_PRINTF(
  3366. "Allocated %lu bytes: all pages may have been written\n",
  3367. (unsigned long)(GC_bytes_allocd + GC_bytes_allocd_before_gc));
  3368. }
  3369. (void)snprintf(buf, sizeof(buf), "/proc/%ld/pagedata", (long)getpid());
  3370. buf[sizeof(buf) - 1] = '\0';
  3371. GC_proc_fd = open(buf, O_RDONLY);
  3372. if (GC_proc_fd < 0) {
  3373. WARN("/proc open failed; cannot enable GC incremental mode\n", 0);
  3374. return FALSE;
  3375. }
  3376. if (syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC) == -1)
  3377. WARN("Could not set FD_CLOEXEC for /proc\n", 0);
  3378. GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
  3379. if (GC_proc_buf == NULL)
  3380. ABORT("Insufficient space for /proc read");
  3381. return TRUE;
  3382. }
  3383. # define READ read
  3384. GC_INNER void GC_read_dirty(GC_bool output_unneeded)
  3385. {
  3386. int nmaps;
  3387. char * bufp = GC_proc_buf;
  3388. int i;
  3389. BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
  3390. if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
  3391. /* Retry with larger buffer. */
  3392. size_t new_size = 2 * GC_proc_buf_size;
  3393. char *new_buf;
  3394. WARN("/proc read failed: GC_proc_buf_size = %" WARN_PRIdPTR "\n",
  3395. (signed_word)GC_proc_buf_size);
  3396. new_buf = GC_scratch_alloc(new_size);
  3397. if (new_buf != 0) {
  3398. GC_scratch_recycle_no_gww(bufp, GC_proc_buf_size);
  3399. GC_proc_buf = bufp = new_buf;
  3400. GC_proc_buf_size = new_size;
  3401. }
  3402. if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
  3403. WARN("Insufficient space for /proc read\n", 0);
  3404. /* Punt: */
  3405. if (!output_unneeded)
  3406. memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
  3407. memset(GC_written_pages, 0xff, sizeof(page_hash_table));
  3408. return;
  3409. }
  3410. }
  3411. /* Copy dirty bits into GC_grungy_pages */
  3412. nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
  3413. # ifdef DEBUG_DIRTY_BITS
  3414. GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n",
  3415. nmaps, ((struct prpageheader *)bufp)->pr_npage);
  3416. # endif
  3417. # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
  3418. GC_noop1(((struct prpageheader *)bufp)->dummy[0]);
  3419. # endif
  3420. bufp += sizeof(struct prpageheader);
  3421. for (i = 0; i < nmaps; i++) {
  3422. struct prasmap * map = (struct prasmap *)bufp;
  3423. ptr_t vaddr = (ptr_t)(map -> pr_vaddr);
  3424. unsigned long npages = map -> pr_npage;
  3425. unsigned pagesize = map -> pr_pagesize;
  3426. ptr_t limit;
  3427. # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK)
  3428. GC_noop1(map->dummy1[0] + map->dummy2[0]);
  3429. # endif
  3430. # ifdef DEBUG_DIRTY_BITS
  3431. GC_log_printf(
  3432. "pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
  3433. (void *)vaddr, npages, map->pr_mflags, pagesize);
  3434. # endif
  3435. bufp += sizeof(struct prasmap);
  3436. limit = vaddr + pagesize * npages;
  3437. for (; (word)vaddr < (word)limit; vaddr += pagesize) {
  3438. if ((*bufp++) & PG_MODIFIED) {
  3439. struct hblk * h;
  3440. ptr_t next_vaddr = vaddr + pagesize;
  3441. # ifdef DEBUG_DIRTY_BITS
  3442. GC_log_printf("dirty page at: %p\n", (void *)vaddr);
  3443. # endif
  3444. for (h = (struct hblk *)vaddr;
  3445. (word)h < (word)next_vaddr; h++) {
  3446. word index = PHT_HASH(h);
  3447. set_pht_entry_from_index(GC_grungy_pages, index);
  3448. }
  3449. }
  3450. }
  3451. bufp = (char *)(((word)bufp + (sizeof(long)-1))
  3452. & ~(word)(sizeof(long)-1));
  3453. }
  3454. # ifdef DEBUG_DIRTY_BITS
  3455. GC_log_printf("Proc VDB read done\n");
  3456. # endif
  3457. /* Update GC_written_pages (even if output_unneeded). */
  3458. GC_or_pages(GC_written_pages, GC_grungy_pages);
  3459. }
  3460. # undef READ
  3461. #endif /* PROC_VDB */
  3462. #ifdef PCR_VDB
  3463. # include "vd/PCR_VD.h"
  3464. # define NPAGES (32*1024) /* 128 MB */
  3465. PCR_VD_DB GC_grungy_bits[NPAGES];
  3466. STATIC ptr_t GC_vd_base = NULL;
  3467. /* Address corresponding to GC_grungy_bits[0] */
  3468. /* HBLKSIZE aligned. */
  3469. GC_INNER GC_bool GC_dirty_init(void)
  3470. {
  3471. /* For the time being, we assume the heap generally grows up */
  3472. GC_vd_base = GC_heap_sects[0].hs_start;
  3473. if (GC_vd_base == 0) {
  3474. ABORT("Bad initial heap segment");
  3475. }
  3476. if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
  3477. != PCR_ERes_okay) {
  3478. ABORT("Dirty bit initialization failed");
  3479. }
  3480. return TRUE;
  3481. }
  3482. GC_INNER void GC_read_dirty(GC_bool output_unneeded GC_ATTR_UNUSED)
  3483. {
  3484. /* lazily enable dirty bits on newly added heap sects */
  3485. {
  3486. static int onhs = 0;
  3487. int nhs = GC_n_heap_sects;
  3488. for(; onhs < nhs; onhs++) {
  3489. PCR_VD_WriteProtectEnable(
  3490. GC_heap_sects[onhs].hs_start,
  3491. GC_heap_sects[onhs].hs_bytes );
  3492. }
  3493. }
  3494. if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
  3495. != PCR_ERes_okay) {
  3496. ABORT("Dirty bit read failed");
  3497. }
  3498. }
  3499. GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
  3500. {
  3501. if ((word)h < (word)GC_vd_base
  3502. || (word)h >= (word)(GC_vd_base + NPAGES*HBLKSIZE)) {
  3503. return(TRUE);
  3504. }
  3505. return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
  3506. }
  3507. GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
  3508. GC_bool is_ptrfree GC_ATTR_UNUSED)
  3509. {
  3510. PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
  3511. PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
  3512. }
  3513. #endif /* PCR_VDB */
  3514. #if defined(MPROTECT_VDB) && defined(DARWIN)
  3515. /* The following sources were used as a "reference" for this exception
  3516. handling code:
  3517. 1. Apple's mach/xnu documentation
  3518. 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
  3519. omnigroup's macosx-dev list.
  3520. www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
  3521. 3. macosx-nat.c from Apple's GDB source code.
  3522. */
  3523. /* The bug that caused all this trouble should now be fixed. This should
  3524. eventually be removed if all goes well. */
  3525. /* #define BROKEN_EXCEPTION_HANDLING */
  3526. #include <mach/mach.h>
  3527. #include <mach/mach_error.h>
  3528. #include <mach/exception.h>
  3529. #include <mach/task.h>
  3530. #include <pthread.h>
  3531. EXTERN_C_BEGIN
  3532. /* Some of the following prototypes are missing in any header, although */
  3533. /* they are documented. Some are in mach/exc.h file. */
  3534. extern boolean_t
  3535. exc_server(mach_msg_header_t *, mach_msg_header_t *);
  3536. extern kern_return_t
  3537. exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
  3538. exception_data_t, mach_msg_type_number_t);
  3539. extern kern_return_t
  3540. exception_raise_state(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
  3541. exception_data_t, mach_msg_type_number_t,
  3542. thread_state_flavor_t*, thread_state_t,
  3543. mach_msg_type_number_t, thread_state_t,
  3544. mach_msg_type_number_t*);
  3545. extern kern_return_t
  3546. exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t,
  3547. exception_type_t, exception_data_t,
  3548. mach_msg_type_number_t, thread_state_flavor_t*,
  3549. thread_state_t, mach_msg_type_number_t,
  3550. thread_state_t, mach_msg_type_number_t*);
  3551. GC_API_OSCALL kern_return_t
  3552. catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
  3553. mach_port_t task, exception_type_t exception,
  3554. exception_data_t code,
  3555. mach_msg_type_number_t code_count);
  3556. GC_API_OSCALL kern_return_t
  3557. catch_exception_raise_state(mach_port_name_t exception_port,
  3558. int exception, exception_data_t code,
  3559. mach_msg_type_number_t codeCnt, int flavor,
  3560. thread_state_t old_state, int old_stateCnt,
  3561. thread_state_t new_state, int new_stateCnt);
  3562. GC_API_OSCALL kern_return_t
  3563. catch_exception_raise_state_identity(mach_port_name_t exception_port,
  3564. mach_port_t thread, mach_port_t task, int exception,
  3565. exception_data_t code, mach_msg_type_number_t codeCnt,
  3566. int flavor, thread_state_t old_state, int old_stateCnt,
  3567. thread_state_t new_state, int new_stateCnt);
  3568. EXTERN_C_END
  3569. /* These should never be called, but just in case... */
  3570. GC_API_OSCALL kern_return_t
  3571. catch_exception_raise_state(mach_port_name_t exception_port GC_ATTR_UNUSED,
  3572. int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
  3573. mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
  3574. thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
  3575. thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
  3576. {
  3577. ABORT_RET("Unexpected catch_exception_raise_state invocation");
  3578. return(KERN_INVALID_ARGUMENT);
  3579. }
  3580. GC_API_OSCALL kern_return_t
  3581. catch_exception_raise_state_identity(
  3582. mach_port_name_t exception_port GC_ATTR_UNUSED,
  3583. mach_port_t thread GC_ATTR_UNUSED, mach_port_t task GC_ATTR_UNUSED,
  3584. int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
  3585. mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
  3586. thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
  3587. thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
  3588. {
  3589. ABORT_RET("Unexpected catch_exception_raise_state_identity invocation");
  3590. return(KERN_INVALID_ARGUMENT);
  3591. }
  3592. #define MAX_EXCEPTION_PORTS 16
  3593. static struct {
  3594. mach_msg_type_number_t count;
  3595. exception_mask_t masks[MAX_EXCEPTION_PORTS];
  3596. exception_handler_t ports[MAX_EXCEPTION_PORTS];
  3597. exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
  3598. thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
  3599. } GC_old_exc_ports;
  3600. STATIC struct ports_s {
  3601. void (*volatile os_callback[3])(void);
  3602. mach_port_t exception;
  3603. # if defined(THREADS)
  3604. mach_port_t reply;
  3605. # endif
  3606. } GC_ports = {
  3607. {
  3608. /* This is to prevent stripping these routines as dead. */
  3609. (void (*)(void))catch_exception_raise,
  3610. (void (*)(void))catch_exception_raise_state,
  3611. (void (*)(void))catch_exception_raise_state_identity
  3612. },
  3613. # ifdef THREADS
  3614. 0, /* for 'exception' */
  3615. # endif
  3616. 0
  3617. };
  3618. typedef struct {
  3619. mach_msg_header_t head;
  3620. } GC_msg_t;
  3621. typedef enum {
  3622. GC_MP_NORMAL,
  3623. GC_MP_DISCARDING,
  3624. GC_MP_STOPPED
  3625. } GC_mprotect_state_t;
  3626. #ifdef THREADS
  3627. /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field, but it */
  3628. /* is not documented. Use the source and see if they should be OK. */
  3629. # define ID_STOP 1
  3630. # define ID_RESUME 2
  3631. /* This value is only used on the reply port. */
  3632. # define ID_ACK 3
  3633. STATIC GC_mprotect_state_t GC_mprotect_state = GC_MP_NORMAL;
  3634. /* The following should ONLY be called when the world is stopped. */
  3635. STATIC void GC_mprotect_thread_notify(mach_msg_id_t id)
  3636. {
  3637. struct buf_s {
  3638. GC_msg_t msg;
  3639. mach_msg_trailer_t trailer;
  3640. } buf;
  3641. mach_msg_return_t r;
  3642. /* remote, local */
  3643. buf.msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
  3644. buf.msg.head.msgh_size = sizeof(buf.msg);
  3645. buf.msg.head.msgh_remote_port = GC_ports.exception;
  3646. buf.msg.head.msgh_local_port = MACH_PORT_NULL;
  3647. buf.msg.head.msgh_id = id;
  3648. r = mach_msg(&buf.msg.head, MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_LARGE,
  3649. sizeof(buf.msg), sizeof(buf), GC_ports.reply,
  3650. MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  3651. if (r != MACH_MSG_SUCCESS)
  3652. ABORT("mach_msg failed in GC_mprotect_thread_notify");
  3653. if (buf.msg.head.msgh_id != ID_ACK)
  3654. ABORT("Invalid ack in GC_mprotect_thread_notify");
  3655. }
  3656. /* Should only be called by the mprotect thread */
  3657. STATIC void GC_mprotect_thread_reply(void)
  3658. {
  3659. GC_msg_t msg;
  3660. mach_msg_return_t r;
  3661. /* remote, local */
  3662. msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
  3663. msg.head.msgh_size = sizeof(msg);
  3664. msg.head.msgh_remote_port = GC_ports.reply;
  3665. msg.head.msgh_local_port = MACH_PORT_NULL;
  3666. msg.head.msgh_id = ID_ACK;
  3667. r = mach_msg(&msg.head, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
  3668. MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  3669. if (r != MACH_MSG_SUCCESS)
  3670. ABORT("mach_msg failed in GC_mprotect_thread_reply");
  3671. }
  3672. GC_INNER void GC_mprotect_stop(void)
  3673. {
  3674. GC_mprotect_thread_notify(ID_STOP);
  3675. }
  3676. GC_INNER void GC_mprotect_resume(void)
  3677. {
  3678. GC_mprotect_thread_notify(ID_RESUME);
  3679. }
  3680. #else
  3681. /* The compiler should optimize away any GC_mprotect_state computations */
  3682. # define GC_mprotect_state GC_MP_NORMAL
  3683. #endif /* !THREADS */
  3684. STATIC void *GC_mprotect_thread(void *arg)
  3685. {
  3686. mach_msg_return_t r;
  3687. /* These two structures contain some private kernel data. We don't */
  3688. /* need to access any of it so we don't bother defining a proper */
  3689. /* struct. The correct definitions are in the xnu source code. */
  3690. struct reply_s {
  3691. mach_msg_header_t head;
  3692. char data[256];
  3693. } reply;
  3694. struct msg_s {
  3695. mach_msg_header_t head;
  3696. mach_msg_body_t msgh_body;
  3697. char data[1024];
  3698. } msg;
  3699. mach_msg_id_t id;
  3700. if ((word)arg == (word)-1) return 0; /* to make compiler happy */
  3701. # if defined(CPPCHECK)
  3702. reply.data[0] = 0; /* to prevent "field unused" warnings */
  3703. msg.data[0] = 0;
  3704. # endif
  3705. # if defined(THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
  3706. GC_darwin_register_mach_handler_thread(mach_thread_self());
  3707. # endif
  3708. for(;;) {
  3709. r = mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE |
  3710. (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
  3711. 0, sizeof(msg), GC_ports.exception,
  3712. GC_mprotect_state == GC_MP_DISCARDING ? 0
  3713. : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  3714. id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
  3715. # if defined(THREADS)
  3716. if(GC_mprotect_state == GC_MP_DISCARDING) {
  3717. if(r == MACH_RCV_TIMED_OUT) {
  3718. GC_mprotect_state = GC_MP_STOPPED;
  3719. GC_mprotect_thread_reply();
  3720. continue;
  3721. }
  3722. if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
  3723. ABORT("Out of order mprotect thread request");
  3724. }
  3725. # endif /* THREADS */
  3726. if (r != MACH_MSG_SUCCESS) {
  3727. ABORT_ARG2("mach_msg failed",
  3728. ": errcode= %d (%s)", (int)r, mach_error_string(r));
  3729. }
  3730. switch(id) {
  3731. # if defined(THREADS)
  3732. case ID_STOP:
  3733. if(GC_mprotect_state != GC_MP_NORMAL)
  3734. ABORT("Called mprotect_stop when state wasn't normal");
  3735. GC_mprotect_state = GC_MP_DISCARDING;
  3736. break;
  3737. case ID_RESUME:
  3738. if(GC_mprotect_state != GC_MP_STOPPED)
  3739. ABORT("Called mprotect_resume when state wasn't stopped");
  3740. GC_mprotect_state = GC_MP_NORMAL;
  3741. GC_mprotect_thread_reply();
  3742. break;
  3743. # endif /* THREADS */
  3744. default:
  3745. /* Handle the message (calls catch_exception_raise) */
  3746. if(!exc_server(&msg.head, &reply.head))
  3747. ABORT("exc_server failed");
  3748. /* Send the reply */
  3749. r = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
  3750. MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
  3751. MACH_PORT_NULL);
  3752. if(r != MACH_MSG_SUCCESS) {
  3753. /* This will fail if the thread dies, but the thread */
  3754. /* shouldn't die... */
  3755. # ifdef BROKEN_EXCEPTION_HANDLING
  3756. GC_err_printf("mach_msg failed with %d %s while sending "
  3757. "exc reply\n", (int)r, mach_error_string(r));
  3758. # else
  3759. ABORT("mach_msg failed while sending exception reply");
  3760. # endif
  3761. }
  3762. } /* switch */
  3763. } /* for(;;) */
  3764. }
  3765. /* All this SIGBUS code shouldn't be necessary. All protection faults should
  3766. be going through the mach exception handler. However, it seems a SIGBUS is
  3767. occasionally sent for some unknown reason. Even more odd, it seems to be
  3768. meaningless and safe to ignore. */
  3769. #ifdef BROKEN_EXCEPTION_HANDLING
  3770. /* Updates to this aren't atomic, but the SIGBUS'es seem pretty rare. */
  3771. /* Even if this doesn't get updated property, it isn't really a problem. */
  3772. STATIC int GC_sigbus_count = 0;
  3773. STATIC void GC_darwin_sigbus(int num, siginfo_t *sip, void *context)
  3774. {
  3775. if (num != SIGBUS)
  3776. ABORT("Got a non-sigbus signal in the sigbus handler");
  3777. /* Ugh... some seem safe to ignore, but too many in a row probably means
  3778. trouble. GC_sigbus_count is reset for each mach exception that is
  3779. handled */
  3780. if (GC_sigbus_count >= 8) {
  3781. ABORT("Got more than 8 SIGBUSs in a row!");
  3782. } else {
  3783. GC_sigbus_count++;
  3784. WARN("Ignoring SIGBUS\n", 0);
  3785. }
  3786. }
  3787. #endif /* BROKEN_EXCEPTION_HANDLING */
  3788. GC_INNER GC_bool GC_mprotect_dirty_init(void)
  3789. {
  3790. kern_return_t r;
  3791. mach_port_t me;
  3792. pthread_t thread;
  3793. pthread_attr_t attr;
  3794. exception_mask_t mask;
  3795. # ifdef CAN_HANDLE_FORK
  3796. if (GC_handle_fork) {
  3797. /* To both support GC incremental mode and GC functions usage in */
  3798. /* the forked child, pthread_atfork should be used to install */
  3799. /* handlers that switch off GC_incremental in the child */
  3800. /* gracefully (unprotecting all pages and clearing */
  3801. /* GC_mach_handler_thread). For now, we just disable incremental */
  3802. /* mode if fork() handling is requested by the client. */
  3803. WARN("Can't turn on GC incremental mode as fork()"
  3804. " handling requested\n", 0);
  3805. return FALSE;
  3806. }
  3807. # endif
  3808. GC_VERBOSE_LOG_PRINTF("Initializing mach/darwin mprotect"
  3809. " virtual dirty bit implementation\n");
  3810. # ifdef BROKEN_EXCEPTION_HANDLING
  3811. WARN("Enabling workarounds for various darwin "
  3812. "exception handling bugs\n", 0);
  3813. # endif
  3814. if (GC_page_size % HBLKSIZE != 0) {
  3815. ABORT("Page size not multiple of HBLKSIZE");
  3816. }
  3817. GC_task_self = me = mach_task_self();
  3818. r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.exception);
  3819. /* TODO: WARN and return FALSE in case of a failure. */
  3820. if (r != KERN_SUCCESS)
  3821. ABORT("mach_port_allocate failed (exception port)");
  3822. r = mach_port_insert_right(me, GC_ports.exception, GC_ports.exception,
  3823. MACH_MSG_TYPE_MAKE_SEND);
  3824. if (r != KERN_SUCCESS)
  3825. ABORT("mach_port_insert_right failed (exception port)");
  3826. # if defined(THREADS)
  3827. r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.reply);
  3828. if(r != KERN_SUCCESS)
  3829. ABORT("mach_port_allocate failed (reply port)");
  3830. # endif
  3831. /* The exceptions we want to catch */
  3832. mask = EXC_MASK_BAD_ACCESS;
  3833. r = task_get_exception_ports(me, mask, GC_old_exc_ports.masks,
  3834. &GC_old_exc_ports.count, GC_old_exc_ports.ports,
  3835. GC_old_exc_ports.behaviors,
  3836. GC_old_exc_ports.flavors);
  3837. if (r != KERN_SUCCESS)
  3838. ABORT("task_get_exception_ports failed");
  3839. r = task_set_exception_ports(me, mask, GC_ports.exception, EXCEPTION_DEFAULT,
  3840. GC_MACH_THREAD_STATE);
  3841. if (r != KERN_SUCCESS)
  3842. ABORT("task_set_exception_ports failed");
  3843. if (pthread_attr_init(&attr) != 0)
  3844. ABORT("pthread_attr_init failed");
  3845. if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
  3846. ABORT("pthread_attr_setdetachedstate failed");
  3847. # undef pthread_create
  3848. /* This will call the real pthread function, not our wrapper */
  3849. if (pthread_create(&thread, &attr, GC_mprotect_thread, NULL) != 0)
  3850. ABORT("pthread_create failed");
  3851. (void)pthread_attr_destroy(&attr);
  3852. /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
  3853. # ifdef BROKEN_EXCEPTION_HANDLING
  3854. {
  3855. struct sigaction sa, oldsa;
  3856. sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
  3857. sigemptyset(&sa.sa_mask);
  3858. sa.sa_flags = SA_RESTART|SA_SIGINFO;
  3859. /* sa.sa_restorer is deprecated and should not be initialized. */
  3860. if (sigaction(SIGBUS, &sa, &oldsa) < 0)
  3861. ABORT("sigaction failed");
  3862. if ((SIG_HNDLR_PTR)oldsa.sa_handler != SIG_DFL) {
  3863. GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
  3864. }
  3865. }
  3866. # endif /* BROKEN_EXCEPTION_HANDLING */
  3867. return TRUE;
  3868. }
  3869. /* The source code for Apple's GDB was used as a reference for the */
  3870. /* exception forwarding code. This code is similar to be GDB code only */
  3871. /* because there is only one way to do it. */
  3872. STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task,
  3873. exception_type_t exception,
  3874. exception_data_t data,
  3875. mach_msg_type_number_t data_count)
  3876. {
  3877. unsigned int i;
  3878. kern_return_t r;
  3879. mach_port_t port;
  3880. exception_behavior_t behavior;
  3881. thread_state_flavor_t flavor;
  3882. thread_state_data_t thread_state;
  3883. mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
  3884. for (i=0; i < GC_old_exc_ports.count; i++)
  3885. if (GC_old_exc_ports.masks[i] & (1 << exception))
  3886. break;
  3887. if (i == GC_old_exc_ports.count)
  3888. ABORT("No handler for exception!");
  3889. port = GC_old_exc_ports.ports[i];
  3890. behavior = GC_old_exc_ports.behaviors[i];
  3891. flavor = GC_old_exc_ports.flavors[i];
  3892. if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
  3893. r = thread_get_state(thread, flavor, thread_state, &thread_state_count);
  3894. if(r != KERN_SUCCESS)
  3895. ABORT("thread_get_state failed in forward_exception");
  3896. }
  3897. switch(behavior) {
  3898. case EXCEPTION_STATE:
  3899. r = exception_raise_state(port, thread, task, exception, data, data_count,
  3900. &flavor, thread_state, thread_state_count,
  3901. thread_state, &thread_state_count);
  3902. break;
  3903. case EXCEPTION_STATE_IDENTITY:
  3904. r = exception_raise_state_identity(port, thread, task, exception, data,
  3905. data_count, &flavor, thread_state,
  3906. thread_state_count, thread_state,
  3907. &thread_state_count);
  3908. break;
  3909. /* case EXCEPTION_DEFAULT: */ /* default signal handlers */
  3910. default: /* user-supplied signal handlers */
  3911. r = exception_raise(port, thread, task, exception, data, data_count);
  3912. }
  3913. if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
  3914. r = thread_set_state(thread, flavor, thread_state, thread_state_count);
  3915. if (r != KERN_SUCCESS)
  3916. ABORT("thread_set_state failed in forward_exception");
  3917. }
  3918. return r;
  3919. }
  3920. #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
  3921. #ifdef ARM32
  3922. # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE
  3923. # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE_COUNT
  3924. # define DARWIN_EXC_STATE_T arm_exception_state_t
  3925. # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
  3926. #elif defined(AARCH64)
  3927. # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE64
  3928. # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE64_COUNT
  3929. # define DARWIN_EXC_STATE_T arm_exception_state64_t
  3930. # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(far)
  3931. #elif defined(POWERPC)
  3932. # if CPP_WORDSZ == 32
  3933. # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE
  3934. # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE_COUNT
  3935. # define DARWIN_EXC_STATE_T ppc_exception_state_t
  3936. # else
  3937. # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE64
  3938. # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE64_COUNT
  3939. # define DARWIN_EXC_STATE_T ppc_exception_state64_t
  3940. # endif
  3941. # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(dar)
  3942. #elif defined(I386) || defined(X86_64)
  3943. # if CPP_WORDSZ == 32
  3944. # if defined(i386_EXCEPTION_STATE_COUNT) \
  3945. && !defined(x86_EXCEPTION_STATE32_COUNT)
  3946. /* Use old naming convention for 32-bit x86. */
  3947. # define DARWIN_EXC_STATE i386_EXCEPTION_STATE
  3948. # define DARWIN_EXC_STATE_COUNT i386_EXCEPTION_STATE_COUNT
  3949. # define DARWIN_EXC_STATE_T i386_exception_state_t
  3950. # else
  3951. # define DARWIN_EXC_STATE x86_EXCEPTION_STATE32
  3952. # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE32_COUNT
  3953. # define DARWIN_EXC_STATE_T x86_exception_state32_t
  3954. # endif
  3955. # else
  3956. # define DARWIN_EXC_STATE x86_EXCEPTION_STATE64
  3957. # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
  3958. # define DARWIN_EXC_STATE_T x86_exception_state64_t
  3959. # endif
  3960. # define DARWIN_EXC_STATE_DAR THREAD_FLD_NAME(faultvaddr)
  3961. #elif !defined(CPPCHECK)
  3962. # error FIXME for non-arm/ppc/x86 darwin
  3963. #endif
  3964. /* This violates the namespace rules but there isn't anything that can */
  3965. /* be done about it. The exception handling stuff is hard coded to */
  3966. /* call this. catch_exception_raise, catch_exception_raise_state and */
  3967. /* and catch_exception_raise_state_identity are called from OS. */
  3968. GC_API_OSCALL kern_return_t
  3969. catch_exception_raise(mach_port_t exception_port GC_ATTR_UNUSED,
  3970. mach_port_t thread, mach_port_t task GC_ATTR_UNUSED,
  3971. exception_type_t exception, exception_data_t code,
  3972. mach_msg_type_number_t code_count GC_ATTR_UNUSED)
  3973. {
  3974. kern_return_t r;
  3975. char *addr;
  3976. thread_state_flavor_t flavor = DARWIN_EXC_STATE;
  3977. mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
  3978. DARWIN_EXC_STATE_T exc_state;
  3979. if (exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
  3980. # ifdef DEBUG_EXCEPTION_HANDLING
  3981. /* We aren't interested, pass it on to the old handler */
  3982. GC_log_printf("Exception: 0x%x Code: 0x%x 0x%x in catch...\n",
  3983. exception, code_count > 0 ? code[0] : -1,
  3984. code_count > 1 ? code[1] : -1);
  3985. # endif
  3986. return FWD();
  3987. }
  3988. r = thread_get_state(thread, flavor, (natural_t*)&exc_state,
  3989. &exc_state_count);
  3990. if(r != KERN_SUCCESS) {
  3991. /* The thread is supposed to be suspended while the exception */
  3992. /* handler is called. This shouldn't fail. */
  3993. # ifdef BROKEN_EXCEPTION_HANDLING
  3994. GC_err_printf("thread_get_state failed in catch_exception_raise\n");
  3995. return KERN_SUCCESS;
  3996. # else
  3997. ABORT("thread_get_state failed in catch_exception_raise");
  3998. # endif
  3999. }
  4000. /* This is the address that caused the fault */
  4001. addr = (char*) exc_state.DARWIN_EXC_STATE_DAR;
  4002. if (!is_header_found_async(addr)) {
  4003. /* Ugh... just like the SIGBUS problem above, it seems we get */
  4004. /* a bogus KERN_PROTECTION_FAILURE every once and a while. We wait */
  4005. /* till we get a bunch in a row before doing anything about it. */
  4006. /* If a "real" fault ever occurs it'll just keep faulting over and */
  4007. /* over and we'll hit the limit pretty quickly. */
  4008. # ifdef BROKEN_EXCEPTION_HANDLING
  4009. static char *last_fault;
  4010. static int last_fault_count;
  4011. if(addr != last_fault) {
  4012. last_fault = addr;
  4013. last_fault_count = 0;
  4014. }
  4015. if(++last_fault_count < 32) {
  4016. if(last_fault_count == 1)
  4017. WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n", addr);
  4018. return KERN_SUCCESS;
  4019. }
  4020. GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p; aborting...\n",
  4021. (void *)addr);
  4022. /* Can't pass it along to the signal handler because that is */
  4023. /* ignoring SIGBUS signals. We also shouldn't call ABORT here as */
  4024. /* signals don't always work too well from the exception handler. */
  4025. EXIT();
  4026. # else /* BROKEN_EXCEPTION_HANDLING */
  4027. /* Pass it along to the next exception handler
  4028. (which should call SIGBUS/SIGSEGV) */
  4029. return FWD();
  4030. # endif /* !BROKEN_EXCEPTION_HANDLING */
  4031. }
  4032. # ifdef BROKEN_EXCEPTION_HANDLING
  4033. /* Reset the number of consecutive SIGBUSs */
  4034. GC_sigbus_count = 0;
  4035. # endif
  4036. if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
  4037. struct hblk * h = (struct hblk*)((word)addr & ~(GC_page_size-1));
  4038. size_t i;
  4039. UNPROTECT(h, GC_page_size);
  4040. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  4041. word index = PHT_HASH(h+i);
  4042. async_set_pht_entry_from_index(GC_dirty_pages, index);
  4043. }
  4044. } else if (GC_mprotect_state == GC_MP_DISCARDING) {
  4045. /* Lie to the thread for now. No sense UNPROTECT()ing the memory
  4046. when we're just going to PROTECT() it again later. The thread
  4047. will just fault again once it resumes */
  4048. } else {
  4049. /* Shouldn't happen, i don't think */
  4050. GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
  4051. return FWD();
  4052. }
  4053. return KERN_SUCCESS;
  4054. }
  4055. #undef FWD
  4056. #ifndef NO_DESC_CATCH_EXCEPTION_RAISE
  4057. /* These symbols should have REFERENCED_DYNAMICALLY (0x10) bit set to */
  4058. /* let strip know they are not to be stripped. */
  4059. __asm__(".desc _catch_exception_raise, 0x10");
  4060. __asm__(".desc _catch_exception_raise_state, 0x10");
  4061. __asm__(".desc _catch_exception_raise_state_identity, 0x10");
  4062. #endif
  4063. #endif /* DARWIN && MPROTECT_VDB */
  4064. #ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
  4065. GC_API int GC_CALL GC_incremental_protection_needs(void)
  4066. {
  4067. return GC_PROTECTS_NONE;
  4068. }
  4069. #endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
  4070. #ifdef ECOS
  4071. /* Undo sbrk() redirection. */
  4072. # undef sbrk
  4073. #endif
  4074. /* If value is non-zero then allocate executable memory. */
  4075. GC_API void GC_CALL GC_set_pages_executable(int value)
  4076. {
  4077. GC_ASSERT(!GC_is_initialized);
  4078. /* Even if IGNORE_PAGES_EXECUTABLE is defined, GC_pages_executable is */
  4079. /* touched here to prevent a compiler warning. */
  4080. GC_pages_executable = (GC_bool)(value != 0);
  4081. }
  4082. /* Returns non-zero if the GC-allocated memory is executable. */
  4083. /* GC_get_pages_executable is defined after all the places */
  4084. /* where GC_get_pages_executable is undefined. */
  4085. GC_API int GC_CALL GC_get_pages_executable(void)
  4086. {
  4087. # ifdef IGNORE_PAGES_EXECUTABLE
  4088. return 1; /* Always allocate executable memory. */
  4089. # else
  4090. return (int)GC_pages_executable;
  4091. # endif
  4092. }
  4093. /* Call stack save code for debugging. Should probably be in */
  4094. /* mach_dep.c, but that requires reorganization. */
  4095. /* I suspect the following works for most X86 *nix variants, so */
  4096. /* long as the frame pointer is explicitly stored. In the case of gcc, */
  4097. /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
  4098. #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
  4099. # include <features.h>
  4100. struct frame {
  4101. struct frame *fr_savfp;
  4102. long fr_savpc;
  4103. # if NARGS > 0
  4104. long fr_arg[NARGS]; /* All the arguments go here. */
  4105. # endif
  4106. };
  4107. #endif
  4108. #if defined(SPARC)
  4109. # if defined(LINUX)
  4110. # include <features.h>
  4111. struct frame {
  4112. long fr_local[8];
  4113. long fr_arg[6];
  4114. struct frame *fr_savfp;
  4115. long fr_savpc;
  4116. # ifndef __arch64__
  4117. char *fr_stret;
  4118. # endif
  4119. long fr_argd[6];
  4120. long fr_argx[0];
  4121. };
  4122. # elif defined (DRSNX)
  4123. # include <sys/sparc/frame.h>
  4124. # elif defined(OPENBSD)
  4125. # include <frame.h>
  4126. # elif defined(FREEBSD) || defined(NETBSD)
  4127. # include <machine/frame.h>
  4128. # else
  4129. # include <sys/frame.h>
  4130. # endif
  4131. # if NARGS > 6
  4132. # error We only know how to get the first 6 arguments
  4133. # endif
  4134. #endif /* SPARC */
  4135. #ifdef NEED_CALLINFO
  4136. /* Fill in the pc and argument information for up to NFRAMES of my */
  4137. /* callers. Ignore my frame and my callers frame. */
  4138. #ifdef LINUX
  4139. # include <unistd.h>
  4140. #endif
  4141. #endif /* NEED_CALLINFO */
  4142. #if defined(GC_HAVE_BUILTIN_BACKTRACE)
  4143. # ifdef _MSC_VER
  4144. # include "private/msvc_dbg.h"
  4145. # else
  4146. # include <execinfo.h>
  4147. # endif
  4148. #endif
  4149. #ifdef SAVE_CALL_CHAIN
  4150. #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
  4151. && defined(GC_HAVE_BUILTIN_BACKTRACE)
  4152. #ifdef REDIRECT_MALLOC
  4153. /* Deal with possible malloc calls in backtrace by omitting */
  4154. /* the infinitely recursing backtrace. */
  4155. # ifdef THREADS
  4156. __thread /* If your compiler doesn't understand this */
  4157. /* you could use something like pthread_getspecific. */
  4158. # endif
  4159. GC_bool GC_in_save_callers = FALSE;
  4160. #endif
  4161. GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
  4162. {
  4163. void * tmp_info[NFRAMES + 1];
  4164. int npcs, i;
  4165. # define IGNORE_FRAMES 1
  4166. /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
  4167. /* points to our own frame. */
  4168. # ifdef REDIRECT_MALLOC
  4169. if (GC_in_save_callers) {
  4170. info[0].ci_pc = (word)(&GC_save_callers);
  4171. for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
  4172. return;
  4173. }
  4174. GC_in_save_callers = TRUE;
  4175. # endif
  4176. GC_ASSERT(I_HOLD_LOCK());
  4177. /* backtrace may call dl_iterate_phdr which is also */
  4178. /* used by GC_register_dynamic_libraries, and */
  4179. /* dl_iterate_phdr is not guaranteed to be reentrant. */
  4180. GC_STATIC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
  4181. npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
  4182. if (npcs > IGNORE_FRAMES)
  4183. BCOPY(&tmp_info[IGNORE_FRAMES], info,
  4184. (npcs - IGNORE_FRAMES) * sizeof(void *));
  4185. for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
  4186. # ifdef REDIRECT_MALLOC
  4187. GC_in_save_callers = FALSE;
  4188. # endif
  4189. }
  4190. #else /* No builtin backtrace; do it ourselves */
  4191. #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
  4192. # define FR_SAVFP fr_fp
  4193. # define FR_SAVPC fr_pc
  4194. #else
  4195. # define FR_SAVFP fr_savfp
  4196. # define FR_SAVPC fr_savpc
  4197. #endif
  4198. #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
  4199. # define BIAS 2047
  4200. #else
  4201. # define BIAS 0
  4202. #endif
  4203. GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
  4204. {
  4205. struct frame *frame;
  4206. struct frame *fp;
  4207. int nframes = 0;
  4208. # ifdef I386
  4209. /* We assume this is turned on only with gcc as the compiler. */
  4210. asm("movl %%ebp,%0" : "=r"(frame));
  4211. fp = frame;
  4212. # else
  4213. frame = (struct frame *)GC_save_regs_in_stack();
  4214. fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
  4215. #endif
  4216. for (; !((word)fp HOTTER_THAN (word)frame)
  4217. && !((word)GC_stackbottom HOTTER_THAN (word)fp)
  4218. && nframes < NFRAMES;
  4219. fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
  4220. # if NARGS > 0
  4221. int i;
  4222. # endif
  4223. info[nframes].ci_pc = fp->FR_SAVPC;
  4224. # if NARGS > 0
  4225. for (i = 0; i < NARGS; i++) {
  4226. info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
  4227. }
  4228. # endif /* NARGS > 0 */
  4229. }
  4230. if (nframes < NFRAMES) info[nframes].ci_pc = 0;
  4231. }
  4232. #endif /* No builtin backtrace */
  4233. #endif /* SAVE_CALL_CHAIN */
  4234. #ifdef NEED_CALLINFO
  4235. /* Print info to stderr. We do NOT hold the allocation lock */
  4236. GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
  4237. {
  4238. int i;
  4239. static int reentry_count = 0;
  4240. GC_bool stop = FALSE;
  4241. DCL_LOCK_STATE;
  4242. /* FIXME: This should probably use a different lock, so that we */
  4243. /* become callable with or without the allocation lock. */
  4244. LOCK();
  4245. ++reentry_count;
  4246. UNLOCK();
  4247. # if NFRAMES == 1
  4248. GC_err_printf("\tCaller at allocation:\n");
  4249. # else
  4250. GC_err_printf("\tCall chain at allocation:\n");
  4251. # endif
  4252. for (i = 0; i < NFRAMES && !stop; i++) {
  4253. if (info[i].ci_pc == 0) break;
  4254. # if NARGS > 0
  4255. {
  4256. int j;
  4257. GC_err_printf("\t\targs: ");
  4258. for (j = 0; j < NARGS; j++) {
  4259. if (j != 0) GC_err_printf(", ");
  4260. GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
  4261. ~(info[i].ci_arg[j]));
  4262. }
  4263. GC_err_printf("\n");
  4264. }
  4265. # endif
  4266. if (reentry_count > 1) {
  4267. /* We were called during an allocation during */
  4268. /* a previous GC_print_callers call; punt. */
  4269. GC_err_printf("\t\t##PC##= 0x%lx\n",
  4270. (unsigned long)info[i].ci_pc);
  4271. continue;
  4272. }
  4273. {
  4274. char buf[40];
  4275. char *name;
  4276. # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
  4277. && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
  4278. char **sym_name =
  4279. backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
  4280. if (sym_name != NULL) {
  4281. name = sym_name[0];
  4282. } else
  4283. # endif
  4284. /* else */ {
  4285. (void)snprintf(buf, sizeof(buf), "##PC##= 0x%lx",
  4286. (unsigned long)info[i].ci_pc);
  4287. buf[sizeof(buf) - 1] = '\0';
  4288. name = buf;
  4289. }
  4290. # if defined(LINUX) && !defined(SMALL_CONFIG)
  4291. /* Try for a line number. */
  4292. {
  4293. FILE *pipe;
  4294. # define EXE_SZ 100
  4295. static char exe_name[EXE_SZ];
  4296. # define CMD_SZ 200
  4297. char cmd_buf[CMD_SZ];
  4298. # define RESULT_SZ 200
  4299. static char result_buf[RESULT_SZ];
  4300. size_t result_len;
  4301. char *old_preload;
  4302. # define PRELOAD_SZ 200
  4303. char preload_buf[PRELOAD_SZ];
  4304. static GC_bool found_exe_name = FALSE;
  4305. static GC_bool will_fail = FALSE;
  4306. int ret_code;
  4307. /* Try to get it via a hairy and expensive scheme. */
  4308. /* First we get the name of the executable: */
  4309. if (will_fail) goto out;
  4310. if (!found_exe_name) {
  4311. ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
  4312. if (ret_code < 0 || ret_code >= EXE_SZ
  4313. || exe_name[0] != '/') {
  4314. will_fail = TRUE; /* Don't try again. */
  4315. goto out;
  4316. }
  4317. exe_name[ret_code] = '\0';
  4318. found_exe_name = TRUE;
  4319. }
  4320. /* Then we use popen to start addr2line -e <exe> <addr> */
  4321. /* There are faster ways to do this, but hopefully this */
  4322. /* isn't time critical. */
  4323. (void)snprintf(cmd_buf, sizeof(cmd_buf),
  4324. "/usr/bin/addr2line -f -e %s 0x%lx",
  4325. exe_name, (unsigned long)info[i].ci_pc);
  4326. cmd_buf[sizeof(cmd_buf) - 1] = '\0';
  4327. old_preload = GETENV("LD_PRELOAD");
  4328. if (0 != old_preload) {
  4329. size_t old_len = strlen(old_preload);
  4330. if (old_len >= PRELOAD_SZ) {
  4331. will_fail = TRUE;
  4332. goto out;
  4333. }
  4334. BCOPY(old_preload, preload_buf, old_len + 1);
  4335. unsetenv ("LD_PRELOAD");
  4336. }
  4337. pipe = popen(cmd_buf, "r");
  4338. if (0 != old_preload
  4339. && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
  4340. WARN("Failed to reset LD_PRELOAD\n", 0);
  4341. }
  4342. if (pipe == NULL
  4343. || (result_len = fread(result_buf, 1,
  4344. RESULT_SZ - 1, pipe)) == 0) {
  4345. if (pipe != NULL) pclose(pipe);
  4346. will_fail = TRUE;
  4347. goto out;
  4348. }
  4349. if (result_buf[result_len - 1] == '\n') --result_len;
  4350. result_buf[result_len] = 0;
  4351. if (result_buf[0] == '?'
  4352. || (result_buf[result_len-2] == ':'
  4353. && result_buf[result_len-1] == '0')) {
  4354. pclose(pipe);
  4355. goto out;
  4356. }
  4357. /* Get rid of embedded newline, if any. Test for "main" */
  4358. {
  4359. char * nl = strchr(result_buf, '\n');
  4360. if (nl != NULL
  4361. && (word)nl < (word)(result_buf + result_len)) {
  4362. *nl = ':';
  4363. }
  4364. if (strncmp(result_buf, "main", nl - result_buf) == 0) {
  4365. stop = TRUE;
  4366. }
  4367. }
  4368. if (result_len < RESULT_SZ - 25) {
  4369. /* Add in hex address */
  4370. (void)snprintf(&result_buf[result_len],
  4371. sizeof(result_buf) - result_len,
  4372. " [0x%lx]", (unsigned long)info[i].ci_pc);
  4373. result_buf[sizeof(result_buf) - 1] = '\0';
  4374. }
  4375. name = result_buf;
  4376. pclose(pipe);
  4377. out:;
  4378. }
  4379. # endif /* LINUX */
  4380. GC_err_printf("\t\t%s\n", name);
  4381. # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
  4382. && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
  4383. if (sym_name != NULL)
  4384. free(sym_name); /* May call GC_[debug_]free; that's OK */
  4385. # endif
  4386. }
  4387. }
  4388. LOCK();
  4389. --reentry_count;
  4390. UNLOCK();
  4391. }
  4392. #endif /* NEED_CALLINFO */
  4393. #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
  4394. /* Dump /proc/self/maps to GC_stderr, to enable looking up names for */
  4395. /* addresses in FIND_LEAK output. */
  4396. void GC_print_address_map(void)
  4397. {
  4398. char *maps;
  4399. GC_err_printf("---------- Begin address map ----------\n");
  4400. maps = GC_get_maps();
  4401. GC_err_puts(maps != NULL ? maps : "Failed to get map!\n");
  4402. GC_err_printf("---------- End address map ----------\n");
  4403. }
  4404. #endif /* LINUX && ELF */