win32_threads.c 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142
  1. /*
  2. * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
  5. * Copyright (c) 2000-2008 by Hewlett-Packard Development Company.
  6. * All rights reserved.
  7. *
  8. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  9. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  10. *
  11. * Permission is hereby granted to use or copy this program
  12. * for any purpose, provided the above notices are retained on all copies.
  13. * Permission to modify the code and to distribute modified code is granted,
  14. * provided the above notices are retained, and a notice that the code was
  15. * modified is included with the above copyright notice.
  16. */
  17. #include "private/gc_priv.h"
  18. #if defined(GC_WIN32_THREADS)
  19. #ifndef WIN32_LEAN_AND_MEAN
  20. # define WIN32_LEAN_AND_MEAN 1
  21. #endif
  22. #define NOSERVICE
  23. #include <windows.h>
  24. #ifdef THREAD_LOCAL_ALLOC
  25. # include "private/thread_local_alloc.h"
  26. #endif /* THREAD_LOCAL_ALLOC */
  27. /* Allocation lock declarations. */
  28. #if !defined(USE_PTHREAD_LOCKS)
  29. GC_INNER CRITICAL_SECTION GC_allocate_ml;
  30. # ifdef GC_ASSERTIONS
  31. GC_INNER DWORD GC_lock_holder = NO_THREAD;
  32. /* Thread id for current holder of allocation lock */
  33. # endif
  34. #else
  35. GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
  36. # ifdef GC_ASSERTIONS
  37. GC_INNER unsigned long GC_lock_holder = NO_THREAD;
  38. # endif
  39. #endif
  40. #undef CreateThread
  41. #undef ExitThread
  42. #undef _beginthreadex
  43. #undef _endthreadex
  44. #ifdef GC_PTHREADS
  45. # include <errno.h> /* for EAGAIN */
  46. /* Cygwin-specific forward decls */
  47. # undef pthread_create
  48. # undef pthread_join
  49. # undef pthread_detach
  50. # ifndef GC_NO_PTHREAD_SIGMASK
  51. # undef pthread_sigmask
  52. # endif
  53. STATIC void * GC_pthread_start(void * arg);
  54. STATIC void GC_thread_exit_proc(void *arg);
  55. # include <pthread.h>
  56. # ifdef CAN_CALL_ATFORK
  57. # include <unistd.h>
  58. # endif
  59. #elif !defined(MSWINCE)
  60. # include <process.h> /* For _beginthreadex, _endthreadex */
  61. # include <errno.h> /* for errno, EAGAIN */
  62. #endif /* !GC_PTHREADS && !MSWINCE */
  63. /* PUSHED_REGS_COUNT is the number of copied registers in copy_ptr_regs. */
  64. static ptr_t copy_ptr_regs(word *regs, const CONTEXT *pcontext);
  65. #if defined(I386)
  66. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  67. # define PUSHED_REGS_COUNT 9
  68. # else
  69. # define PUSHED_REGS_COUNT 7
  70. # endif
  71. #elif defined(X86_64) || defined(SHx)
  72. # define PUSHED_REGS_COUNT 15
  73. #elif defined(ARM32)
  74. # define PUSHED_REGS_COUNT 13
  75. #elif defined(AARCH64)
  76. # define PUSHED_REGS_COUNT 30
  77. #elif defined(MIPS) || defined(ALPHA)
  78. # define PUSHED_REGS_COUNT 28
  79. #elif defined(PPC)
  80. # define PUSHED_REGS_COUNT 29
  81. #endif
  82. /* DllMain-based thread registration is currently incompatible */
  83. /* with thread-local allocation, pthreads and WinCE. */
  84. #if (defined(GC_DLL) || defined(GC_INSIDE_DLL)) \
  85. && !defined(GC_NO_THREADS_DISCOVERY) && !defined(MSWINCE) \
  86. && !defined(THREAD_LOCAL_ALLOC) && !defined(GC_PTHREADS)
  87. /* This code operates in two distinct modes, depending on */
  88. /* the setting of GC_win32_dll_threads. */
  89. /* If GC_win32_dll_threads is set, all threads in the process */
  90. /* are implicitly registered with the GC by DllMain. */
  91. /* No explicit registration is required, and attempts at */
  92. /* explicit registration are ignored. This mode is */
  93. /* very different from the Posix operation of the collector. */
  94. /* In this mode access to the thread table is lock-free. */
  95. /* Hence there is a static limit on the number of threads. */
  96. # ifdef GC_DISCOVER_TASK_THREADS
  97. /* GC_DISCOVER_TASK_THREADS should be used if DllMain-based */
  98. /* thread registration is required but it is impossible to */
  99. /* call GC_use_threads_discovery before other GC routines. */
  100. # define GC_win32_dll_threads TRUE
  101. # else
  102. STATIC GC_bool GC_win32_dll_threads = FALSE;
  103. /* GC_win32_dll_threads must be set (if needed) at the */
  104. /* application initialization time, i.e. before any */
  105. /* collector or thread calls. We make it a "dynamic" */
  106. /* option only to avoid multiple library versions. */
  107. # endif
  108. #else
  109. /* If GC_win32_dll_threads is FALSE (or the collector is */
  110. /* built without GC_DLL defined), things operate in a way */
  111. /* that is very similar to Posix platforms, and new threads */
  112. /* must be registered with the collector, e.g. by using */
  113. /* preprocessor-based interception of the thread primitives. */
  114. /* In this case, we use a real data structure for the thread */
  115. /* table. Note that there is no equivalent of linker-based */
  116. /* call interception, since we don't have ELF-like */
  117. /* facilities. The Windows analog appears to be "API */
  118. /* hooking", which really seems to be a standard way to */
  119. /* do minor binary rewriting (?). I'd prefer not to have */
  120. /* the basic collector rely on such facilities, but an */
  121. /* optional package that intercepts thread calls this way */
  122. /* would probably be nice. */
  123. # ifndef GC_NO_THREADS_DISCOVERY
  124. # define GC_NO_THREADS_DISCOVERY
  125. # endif
  126. # define GC_win32_dll_threads FALSE
  127. # undef MAX_THREADS
  128. # define MAX_THREADS 1 /* dll_thread_table[] is always empty. */
  129. #endif /* GC_NO_THREADS_DISCOVERY */
  130. /* We have two versions of the thread table. Which one */
  131. /* we us depends on whether or not GC_win32_dll_threads */
  132. /* is set. Note that before initialization, we don't */
  133. /* add any entries to either table, even if DllMain is */
  134. /* called. The main thread will be added on */
  135. /* initialization. */
  136. /* The type of the first argument to InterlockedExchange. */
  137. /* Documented to be LONG volatile *, but at least gcc likes */
  138. /* this better. */
  139. typedef LONG * IE_t;
  140. STATIC GC_bool GC_thr_initialized = FALSE;
  141. #ifndef GC_ALWAYS_MULTITHREADED
  142. GC_INNER GC_bool GC_need_to_lock = FALSE;
  143. #endif
  144. static GC_bool parallel_initialized = FALSE;
  145. /* GC_use_threads_discovery() is currently incompatible with pthreads */
  146. /* and WinCE. It might be possible to get DllMain-based thread */
  147. /* registration to work with Cygwin, but if you try it then you are on */
  148. /* your own. */
  149. GC_API void GC_CALL GC_use_threads_discovery(void)
  150. {
  151. # ifdef GC_NO_THREADS_DISCOVERY
  152. ABORT("GC DllMain-based thread registration unsupported");
  153. # else
  154. /* Turn on GC_win32_dll_threads. */
  155. GC_ASSERT(!parallel_initialized);
  156. /* Note that GC_use_threads_discovery is expected to be called by */
  157. /* the client application (not from DllMain) at start-up. */
  158. # ifndef GC_DISCOVER_TASK_THREADS
  159. GC_win32_dll_threads = TRUE;
  160. # endif
  161. GC_init_parallel();
  162. # endif
  163. }
  164. STATIC DWORD GC_main_thread = 0;
  165. #define ADDR_LIMIT ((ptr_t)(word)-1)
  166. struct GC_Thread_Rep {
  167. union {
  168. # ifndef GC_NO_THREADS_DISCOVERY
  169. volatile AO_t in_use;
  170. /* Updated without lock. */
  171. /* We assert that unused */
  172. /* entries have invalid ids of */
  173. /* zero and zero stack fields. */
  174. /* Used only with GC_win32_dll_threads. */
  175. # endif
  176. struct GC_Thread_Rep * next;
  177. /* Hash table link without */
  178. /* GC_win32_dll_threads. */
  179. /* More recently allocated threads */
  180. /* with a given pthread id come */
  181. /* first. (All but the first are */
  182. /* guaranteed to be dead, but we may */
  183. /* not yet have registered the join.) */
  184. } tm; /* table_management */
  185. DWORD id;
  186. # ifdef MSWINCE
  187. /* According to MSDN specs for WinCE targets: */
  188. /* - DuplicateHandle() is not applicable to thread handles; and */
  189. /* - the value returned by GetCurrentThreadId() could be used as */
  190. /* a "real" thread handle (for SuspendThread(), ResumeThread() and */
  191. /* GetThreadContext()). */
  192. # define THREAD_HANDLE(t) (HANDLE)(word)(t)->id
  193. # else
  194. HANDLE handle;
  195. # define THREAD_HANDLE(t) (t)->handle
  196. # endif
  197. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  198. PNT_TIB tib;
  199. # endif
  200. ptr_t stack_base; /* The cold end of the stack. */
  201. /* 0 ==> entry not valid. */
  202. /* !in_use ==> stack_base == 0 */
  203. ptr_t last_stack_min; /* Last known minimum (hottest) address */
  204. /* in stack or ADDR_LIMIT if unset */
  205. # ifdef IA64
  206. ptr_t backing_store_end;
  207. ptr_t backing_store_ptr;
  208. # endif
  209. ptr_t thread_blocked_sp; /* Protected by GC lock. */
  210. /* NULL value means thread unblocked. */
  211. /* If set to non-NULL, thread will */
  212. /* acquire GC lock before doing any */
  213. /* pointer manipulations. Thus it does */
  214. /* not need to stop this thread. */
  215. struct GC_traced_stack_sect_s *traced_stack_sect;
  216. /* Points to the "stack section" data */
  217. /* held in stack by the innermost */
  218. /* GC_call_with_gc_active() of this */
  219. /* thread. May be NULL. */
  220. unsigned short finalizer_skipped;
  221. unsigned char finalizer_nested;
  222. /* Used by GC_check_finalizer_nested() */
  223. /* to minimize the level of recursion */
  224. /* when a client finalizer allocates */
  225. /* memory (initially both are 0). */
  226. unsigned char suspended; /* really of GC_bool type */
  227. # ifdef GC_PTHREADS
  228. unsigned char flags; /* Protected by GC lock. */
  229. # define FINISHED 1 /* Thread has exited. */
  230. # define DETACHED 2 /* Thread is intended to be detached. */
  231. # define KNOWN_FINISHED(t) (((t) -> flags) & FINISHED)
  232. pthread_t pthread_id;
  233. void *status; /* hold exit value until join in case it's a pointer */
  234. # else
  235. # define KNOWN_FINISHED(t) 0
  236. # endif
  237. # ifdef THREAD_LOCAL_ALLOC
  238. struct thread_local_freelists tlfs;
  239. # endif
  240. # ifdef RETRY_GET_THREAD_CONTEXT
  241. ptr_t context_sp;
  242. word context_regs[PUSHED_REGS_COUNT];
  243. /* Populated as part of GC_suspend() as */
  244. /* resume/suspend loop may be needed for the */
  245. /* call to GetThreadContext() to succeed. */
  246. # endif
  247. };
  248. typedef struct GC_Thread_Rep * GC_thread;
  249. typedef volatile struct GC_Thread_Rep * GC_vthread;
  250. #ifndef GC_NO_THREADS_DISCOVERY
  251. /* We assumed that volatile ==> memory ordering, at least among */
  252. /* volatiles. This code should consistently use atomic_ops. */
  253. STATIC volatile GC_bool GC_please_stop = FALSE;
  254. #elif defined(GC_ASSERTIONS)
  255. STATIC GC_bool GC_please_stop = FALSE;
  256. #endif
  257. /*
  258. * We track thread attachments while the world is supposed to be stopped.
  259. * Unfortunately, we can't stop them from starting, since blocking in
  260. * DllMain seems to cause the world to deadlock. Thus we have to recover
  261. * If we notice this in the middle of marking.
  262. */
  263. #ifndef GC_NO_THREADS_DISCOVERY
  264. STATIC volatile AO_t GC_attached_thread = FALSE;
  265. #endif
  266. #if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS)
  267. /* Return TRUE if an thread was attached since we last asked or */
  268. /* since GC_attached_thread was explicitly reset. */
  269. GC_INNER GC_bool GC_started_thread_while_stopped(void)
  270. {
  271. # ifndef GC_NO_THREADS_DISCOVERY
  272. if (GC_win32_dll_threads) {
  273. # ifdef AO_HAVE_compare_and_swap_release
  274. if (AO_compare_and_swap_release(&GC_attached_thread, TRUE,
  275. FALSE /* stored */))
  276. return TRUE;
  277. # else
  278. AO_nop_full(); /* Prior heap reads need to complete earlier. */
  279. if (AO_load(&GC_attached_thread)) {
  280. AO_store(&GC_attached_thread, FALSE);
  281. return TRUE;
  282. }
  283. # endif
  284. }
  285. # endif
  286. return FALSE;
  287. }
  288. #endif /* WRAP_MARK_SOME */
  289. /* Thread table used if GC_win32_dll_threads is set. */
  290. /* This is a fixed size array. */
  291. /* Since we use runtime conditionals, both versions */
  292. /* are always defined. */
  293. # ifndef MAX_THREADS
  294. # define MAX_THREADS 512
  295. # endif
  296. /* Things may get quite slow for large numbers of threads, */
  297. /* since we look them up with sequential search. */
  298. volatile struct GC_Thread_Rep dll_thread_table[MAX_THREADS];
  299. STATIC volatile LONG GC_max_thread_index = 0;
  300. /* Largest index in dll_thread_table */
  301. /* that was ever used. */
  302. /* And now the version used if GC_win32_dll_threads is not set. */
  303. /* This is a chained hash table, with much of the code borrowed */
  304. /* From the Posix implementation. */
  305. #ifndef THREAD_TABLE_SZ
  306. # define THREAD_TABLE_SZ 256 /* Power of 2 (for speed). */
  307. #endif
  308. #define THREAD_TABLE_INDEX(id) /* id is of DWORD type */ \
  309. (int)((((id) >> 8) ^ (id)) % THREAD_TABLE_SZ)
  310. STATIC GC_thread GC_threads[THREAD_TABLE_SZ];
  311. /* It may not be safe to allocate when we register the first thread. */
  312. /* Thus we allocated one statically. It does not contain any pointer */
  313. /* field we need to push ("next" and "status" fields are unused). */
  314. static struct GC_Thread_Rep first_thread;
  315. static GC_bool first_thread_used = FALSE;
  316. /* Add a thread to GC_threads. We assume it wasn't already there. */
  317. /* Caller holds allocation lock. */
  318. /* Unlike the pthreads version, the id field is set by the caller. */
  319. STATIC GC_thread GC_new_thread(DWORD id)
  320. {
  321. int hv = THREAD_TABLE_INDEX(id);
  322. GC_thread result;
  323. # ifdef DEBUG_THREADS
  324. GC_log_printf("Creating thread 0x%lx\n", (long)id);
  325. if (GC_threads[hv] != NULL)
  326. GC_log_printf("Hash collision at GC_threads[%d]\n", hv);
  327. # endif
  328. GC_ASSERT(I_HOLD_LOCK());
  329. if (!EXPECT(first_thread_used, TRUE)) {
  330. result = &first_thread;
  331. first_thread_used = TRUE;
  332. GC_ASSERT(NULL == GC_threads[hv]);
  333. } else {
  334. GC_ASSERT(!GC_win32_dll_threads);
  335. result = (struct GC_Thread_Rep *)
  336. GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
  337. if (result == 0) return(0);
  338. }
  339. /* result -> id = id; Done by caller. */
  340. result -> tm.next = GC_threads[hv];
  341. GC_threads[hv] = result;
  342. # ifdef GC_PTHREADS
  343. GC_ASSERT(result -> flags == 0);
  344. # endif
  345. GC_ASSERT(result -> thread_blocked_sp == NULL);
  346. if (EXPECT(result != &first_thread, TRUE))
  347. GC_dirty(result);
  348. return(result);
  349. }
  350. STATIC GC_bool GC_in_thread_creation = FALSE;
  351. /* Protected by allocation lock. */
  352. GC_INLINE void GC_record_stack_base(GC_vthread me,
  353. const struct GC_stack_base *sb)
  354. {
  355. me -> stack_base = (ptr_t)sb->mem_base;
  356. # ifdef IA64
  357. me -> backing_store_end = (ptr_t)sb->reg_base;
  358. # endif
  359. if (me -> stack_base == NULL)
  360. ABORT("Bad stack base in GC_register_my_thread");
  361. }
  362. /* This may be called from DllMain, and hence operates under unusual */
  363. /* constraints. In particular, it must be lock-free if */
  364. /* GC_win32_dll_threads is set. Always called from the thread being */
  365. /* added. If GC_win32_dll_threads is not set, we already hold the */
  366. /* allocation lock except possibly during single-threaded startup code. */
  367. STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
  368. DWORD thread_id)
  369. {
  370. GC_vthread me;
  371. /* The following should be a no-op according to the win32 */
  372. /* documentation. There is empirical evidence that it */
  373. /* isn't. - HB */
  374. # if defined(MPROTECT_VDB)
  375. if (GC_incremental
  376. # ifdef GWW_VDB
  377. && !GC_gww_dirty_init()
  378. # endif
  379. )
  380. GC_set_write_fault_handler();
  381. # endif
  382. # ifndef GC_NO_THREADS_DISCOVERY
  383. if (GC_win32_dll_threads) {
  384. int i;
  385. /* It appears to be unsafe to acquire a lock here, since this */
  386. /* code is apparently not preemptible on some systems. */
  387. /* (This is based on complaints, not on Microsoft's official */
  388. /* documentation, which says this should perform "only simple */
  389. /* initialization tasks".) */
  390. /* Hence we make do with nonblocking synchronization. */
  391. /* It has been claimed that DllMain is really only executed with */
  392. /* a particular system lock held, and thus careful use of locking */
  393. /* around code that doesn't call back into the system libraries */
  394. /* might be OK. But this hasn't been tested across all win32 */
  395. /* variants. */
  396. /* cast away volatile qualifier */
  397. for (i = 0;
  398. InterlockedExchange((void*)&dll_thread_table[i].tm.in_use, 1) != 0;
  399. i++) {
  400. /* Compare-and-swap would make this cleaner, but that's not */
  401. /* supported before Windows 98 and NT 4.0. In Windows 2000, */
  402. /* InterlockedExchange is supposed to be replaced by */
  403. /* InterlockedExchangePointer, but that's not really what I */
  404. /* want here. */
  405. /* FIXME: We should eventually declare Win95 dead and use AO_ */
  406. /* primitives here. */
  407. if (i == MAX_THREADS - 1)
  408. ABORT("Too many threads");
  409. }
  410. /* Update GC_max_thread_index if necessary. The following is */
  411. /* safe, and unlike CompareExchange-based solutions seems to work */
  412. /* on all Windows95 and later platforms. */
  413. /* Unfortunately, GC_max_thread_index may be temporarily out of */
  414. /* bounds, so readers have to compensate. */
  415. while (i > GC_max_thread_index) {
  416. InterlockedIncrement((IE_t)&GC_max_thread_index);
  417. }
  418. if (GC_max_thread_index >= MAX_THREADS) {
  419. /* We overshot due to simultaneous increments. */
  420. /* Setting it to MAX_THREADS-1 is always safe. */
  421. GC_max_thread_index = MAX_THREADS - 1;
  422. }
  423. me = dll_thread_table + i;
  424. } else
  425. # endif
  426. /* else */ /* Not using DllMain */ {
  427. GC_ASSERT(I_HOLD_LOCK());
  428. GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
  429. me = GC_new_thread(thread_id);
  430. GC_in_thread_creation = FALSE;
  431. if (me == 0)
  432. ABORT("Failed to allocate memory for thread registering");
  433. }
  434. # ifdef GC_PTHREADS
  435. /* me can be NULL -> segfault */
  436. me -> pthread_id = pthread_self();
  437. # endif
  438. # ifndef MSWINCE
  439. /* GetCurrentThread() returns a pseudohandle (a const value). */
  440. if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
  441. GetCurrentProcess(),
  442. (HANDLE*)&(me -> handle),
  443. 0 /* dwDesiredAccess */, FALSE /* bInheritHandle */,
  444. DUPLICATE_SAME_ACCESS)) {
  445. ABORT_ARG1("DuplicateHandle failed",
  446. ": errcode= 0x%X", (unsigned)GetLastError());
  447. }
  448. # endif
  449. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  450. me -> tib = (PNT_TIB)NtCurrentTeb();
  451. # endif
  452. me -> last_stack_min = ADDR_LIMIT;
  453. GC_record_stack_base(me, sb);
  454. /* Up until this point, GC_push_all_stacks considers this thread */
  455. /* invalid. */
  456. /* Up until this point, this entry is viewed as reserved but invalid */
  457. /* by GC_delete_thread. */
  458. me -> id = thread_id;
  459. # if defined(THREAD_LOCAL_ALLOC)
  460. GC_init_thread_local((GC_tlfs)(&(me->tlfs)));
  461. # endif
  462. # ifndef GC_NO_THREADS_DISCOVERY
  463. if (GC_win32_dll_threads) {
  464. if (GC_please_stop) {
  465. AO_store(&GC_attached_thread, TRUE);
  466. AO_nop_full(); /* Later updates must become visible after this. */
  467. }
  468. /* We'd like to wait here, but can't, since waiting in DllMain */
  469. /* provokes deadlocks. */
  470. /* Thus we force marking to be restarted instead. */
  471. } else
  472. # endif
  473. /* else */ {
  474. GC_ASSERT(!GC_please_stop);
  475. /* Otherwise both we and the thread stopping code would be */
  476. /* holding the allocation lock. */
  477. }
  478. return (GC_thread)(me);
  479. }
  480. /*
  481. * GC_max_thread_index may temporarily be larger than MAX_THREADS.
  482. * To avoid subscript errors, we check on access.
  483. */
  484. GC_INLINE LONG GC_get_max_thread_index(void)
  485. {
  486. LONG my_max = GC_max_thread_index;
  487. if (my_max >= MAX_THREADS) return MAX_THREADS - 1;
  488. return my_max;
  489. }
  490. /* Return the GC_thread corresponding to a thread id. May be called */
  491. /* without a lock, but should be called in contexts in which the */
  492. /* requested thread cannot be asynchronously deleted, e.g. from the */
  493. /* thread itself. */
  494. /* This version assumes that either GC_win32_dll_threads is set, or */
  495. /* we hold the allocator lock. */
  496. /* Also used (for assertion checking only) from thread_local_alloc.c. */
  497. STATIC GC_thread GC_lookup_thread_inner(DWORD thread_id)
  498. {
  499. # ifndef GC_NO_THREADS_DISCOVERY
  500. if (GC_win32_dll_threads) {
  501. int i;
  502. LONG my_max = GC_get_max_thread_index();
  503. for (i = 0; i <= my_max &&
  504. (!AO_load_acquire(&dll_thread_table[i].tm.in_use)
  505. || dll_thread_table[i].id != thread_id);
  506. /* Must still be in_use, since nobody else can store our */
  507. /* thread_id. */
  508. i++) {
  509. /* empty */
  510. }
  511. return i <= my_max ? (GC_thread)(dll_thread_table + i) : NULL;
  512. } else
  513. # endif
  514. /* else */ {
  515. GC_thread p = GC_threads[THREAD_TABLE_INDEX(thread_id)];
  516. GC_ASSERT(I_HOLD_LOCK());
  517. while (p != 0 && p -> id != thread_id) p = p -> tm.next;
  518. return(p);
  519. }
  520. }
  521. #ifdef LINT2
  522. # define CHECK_LOOKUP_MY_THREAD(me) \
  523. if (!(me)) ABORT("GC_lookup_thread_inner(GetCurrentThreadId) failed")
  524. #else
  525. # define CHECK_LOOKUP_MY_THREAD(me) /* empty */
  526. #endif
  527. /* Called by GC_finalize() (in case of an allocation failure observed). */
  528. /* GC_reset_finalizer_nested() is the same as in pthread_support.c. */
  529. GC_INNER void GC_reset_finalizer_nested(void)
  530. {
  531. GC_thread me = GC_lookup_thread_inner(GetCurrentThreadId());
  532. CHECK_LOOKUP_MY_THREAD(me);
  533. me->finalizer_nested = 0;
  534. }
  535. /* Checks and updates the thread-local level of finalizers recursion. */
  536. /* Returns NULL if GC_invoke_finalizers() should not be called by the */
  537. /* collector (to minimize the risk of a deep finalizers recursion), */
  538. /* otherwise returns a pointer to the thread-local finalizer_nested. */
  539. /* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */
  540. /* GC_check_finalizer_nested() is the same as in pthread_support.c. */
  541. GC_INNER unsigned char *GC_check_finalizer_nested(void)
  542. {
  543. GC_thread me = GC_lookup_thread_inner(GetCurrentThreadId());
  544. unsigned nesting_level;
  545. CHECK_LOOKUP_MY_THREAD(me);
  546. nesting_level = me->finalizer_nested;
  547. if (nesting_level) {
  548. /* We are inside another GC_invoke_finalizers(). */
  549. /* Skip some implicitly-called GC_invoke_finalizers() */
  550. /* depending on the nesting (recursion) level. */
  551. if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
  552. me->finalizer_skipped = 0;
  553. }
  554. me->finalizer_nested = (unsigned char)(nesting_level + 1);
  555. return &me->finalizer_nested;
  556. }
  557. #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
  558. /* This is called from thread-local GC_malloc(). */
  559. GC_bool GC_is_thread_tsd_valid(void *tsd)
  560. {
  561. GC_thread me;
  562. DCL_LOCK_STATE;
  563. LOCK();
  564. me = GC_lookup_thread_inner(GetCurrentThreadId());
  565. UNLOCK();
  566. return (word)tsd >= (word)(&me->tlfs)
  567. && (word)tsd < (word)(&me->tlfs) + sizeof(me->tlfs);
  568. }
  569. #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
  570. GC_API int GC_CALL GC_thread_is_registered(void)
  571. {
  572. DWORD thread_id = GetCurrentThreadId();
  573. GC_thread me;
  574. DCL_LOCK_STATE;
  575. LOCK();
  576. me = GC_lookup_thread_inner(thread_id);
  577. UNLOCK();
  578. return me != NULL;
  579. }
  580. GC_API void GC_CALL GC_register_altstack(void *stack GC_ATTR_UNUSED,
  581. GC_word stack_size GC_ATTR_UNUSED,
  582. void *altstack GC_ATTR_UNUSED,
  583. GC_word altstack_size GC_ATTR_UNUSED)
  584. {
  585. /* TODO: Implement */
  586. }
  587. /* Make sure thread descriptor t is not protected by the VDB */
  588. /* implementation. */
  589. /* Used to prevent write faults when the world is (partially) stopped, */
  590. /* since it may have been stopped with a system lock held, and that */
  591. /* lock may be required for fault handling. */
  592. #if defined(MPROTECT_VDB)
  593. # define UNPROTECT_THREAD(t) \
  594. if (!GC_win32_dll_threads && GC_incremental && t != &first_thread) { \
  595. GC_ASSERT(SMALL_OBJ(GC_size(t))); \
  596. GC_remove_protection(HBLKPTR(t), 1, FALSE); \
  597. } else (void)0
  598. #else
  599. # define UNPROTECT_THREAD(t) (void)0
  600. #endif
  601. #ifdef CYGWIN32
  602. # define GC_PTHREAD_PTRVAL(pthread_id) pthread_id
  603. #elif defined(GC_WIN32_PTHREADS) || defined(GC_PTHREADS_PARAMARK)
  604. # include <pthread.h> /* to check for winpthreads */
  605. # if defined(__WINPTHREADS_VERSION_MAJOR)
  606. # define GC_PTHREAD_PTRVAL(pthread_id) pthread_id
  607. # else
  608. # define GC_PTHREAD_PTRVAL(pthread_id) pthread_id.p
  609. # endif
  610. #endif
  611. /* If a thread has been joined, but we have not yet */
  612. /* been notified, then there may be more than one thread */
  613. /* in the table with the same win32 id. */
  614. /* This is OK, but we need a way to delete a specific one. */
  615. /* Assumes we hold the allocation lock unless */
  616. /* GC_win32_dll_threads is set. Does not actually free */
  617. /* GC_thread entry (only unlinks it). */
  618. /* If GC_win32_dll_threads is set it should be called from the */
  619. /* thread being deleted. */
  620. STATIC void GC_delete_gc_thread_no_free(GC_vthread t)
  621. {
  622. # ifndef MSWINCE
  623. CloseHandle(t->handle);
  624. # endif
  625. # ifndef GC_NO_THREADS_DISCOVERY
  626. if (GC_win32_dll_threads) {
  627. /* This is intended to be lock-free. */
  628. /* It is either called synchronously from the thread being */
  629. /* deleted, or by the joining thread. */
  630. /* In this branch asynchronous changes to (*t) are possible. */
  631. /* It's not allowed to call GC_printf (and the friends) here, */
  632. /* see GC_stop_world() for the information. */
  633. t -> stack_base = 0;
  634. t -> id = 0;
  635. # ifdef RETRY_GET_THREAD_CONTEXT
  636. t -> context_sp = NULL;
  637. # endif
  638. AO_store_release(&t->tm.in_use, FALSE);
  639. } else
  640. # endif
  641. /* else */ {
  642. DWORD id = ((GC_thread)t) -> id;
  643. /* Cast away volatile qualifier, since we have lock. */
  644. int hv = THREAD_TABLE_INDEX(id);
  645. GC_thread p = GC_threads[hv];
  646. GC_thread prev = NULL;
  647. GC_ASSERT(I_HOLD_LOCK());
  648. while (p != (GC_thread)t) {
  649. prev = p;
  650. p = p -> tm.next;
  651. }
  652. if (prev == 0) {
  653. GC_threads[hv] = p -> tm.next;
  654. } else {
  655. GC_ASSERT(prev != &first_thread);
  656. prev -> tm.next = p -> tm.next;
  657. GC_dirty(prev);
  658. }
  659. }
  660. }
  661. /* Delete a thread from GC_threads. We assume it is there. */
  662. /* (The code intentionally traps if it wasn't.) Assumes we */
  663. /* hold the allocation lock unless GC_win32_dll_threads is set. */
  664. /* If GC_win32_dll_threads is set then it should be called from */
  665. /* the thread being deleted. It is also safe to delete the */
  666. /* main thread (unless GC_win32_dll_threads). */
  667. STATIC void GC_delete_thread(DWORD id)
  668. {
  669. if (GC_win32_dll_threads) {
  670. GC_vthread t = GC_lookup_thread_inner(id);
  671. if (0 == t) {
  672. WARN("Removing nonexistent thread, id = %" WARN_PRIdPTR "\n", id);
  673. } else {
  674. GC_delete_gc_thread_no_free(t);
  675. }
  676. } else {
  677. int hv = THREAD_TABLE_INDEX(id);
  678. GC_thread p = GC_threads[hv];
  679. GC_thread prev = NULL;
  680. GC_ASSERT(I_HOLD_LOCK());
  681. while (p -> id != id) {
  682. prev = p;
  683. p = p -> tm.next;
  684. }
  685. # ifndef MSWINCE
  686. CloseHandle(p->handle);
  687. # endif
  688. if (prev == 0) {
  689. GC_threads[hv] = p -> tm.next;
  690. } else {
  691. GC_ASSERT(prev != &first_thread);
  692. prev -> tm.next = p -> tm.next;
  693. GC_dirty(prev);
  694. }
  695. if (EXPECT(p != &first_thread, TRUE)) {
  696. GC_INTERNAL_FREE(p);
  697. }
  698. }
  699. }
  700. GC_API void GC_CALL GC_allow_register_threads(void)
  701. {
  702. /* Check GC is initialized and the current thread is registered. */
  703. GC_ASSERT(GC_lookup_thread_inner(GetCurrentThreadId()) != 0);
  704. # if !defined(GC_ALWAYS_MULTITHREADED) && !defined(PARALLEL_MARK) \
  705. && !defined(GC_NO_THREADS_DISCOVERY)
  706. /* GC_init() does not call GC_init_parallel() in this case. */
  707. parallel_initialized = TRUE;
  708. # endif
  709. set_need_to_lock();
  710. }
  711. GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
  712. {
  713. GC_thread me;
  714. DWORD thread_id = GetCurrentThreadId();
  715. DCL_LOCK_STATE;
  716. if (GC_need_to_lock == FALSE)
  717. ABORT("Threads explicit registering is not previously enabled");
  718. /* We lock here, since we want to wait for an ongoing GC. */
  719. LOCK();
  720. me = GC_lookup_thread_inner(thread_id);
  721. if (me == 0) {
  722. # ifdef GC_PTHREADS
  723. me = GC_register_my_thread_inner(sb, thread_id);
  724. me -> flags |= DETACHED;
  725. /* Treat as detached, since we do not need to worry about */
  726. /* pointer results. */
  727. # else
  728. GC_register_my_thread_inner(sb, thread_id);
  729. # endif
  730. UNLOCK();
  731. return GC_SUCCESS;
  732. } else
  733. # ifdef GC_PTHREADS
  734. /* else */ if ((me -> flags & FINISHED) != 0) {
  735. GC_record_stack_base(me, sb);
  736. me -> flags &= ~FINISHED; /* but not DETACHED */
  737. # ifdef THREAD_LOCAL_ALLOC
  738. GC_init_thread_local((GC_tlfs)(&me->tlfs));
  739. # endif
  740. UNLOCK();
  741. return GC_SUCCESS;
  742. } else
  743. # endif
  744. /* else */ {
  745. UNLOCK();
  746. return GC_DUPLICATE;
  747. }
  748. }
  749. /* Similar to that in pthread_support.c. */
  750. STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
  751. {
  752. GC_ASSERT(I_HOLD_LOCK());
  753. if (GC_incremental && GC_collection_in_progress()) {
  754. word old_gc_no = GC_gc_no;
  755. /* Make sure that no part of our stack is still on the mark stack, */
  756. /* since it's about to be unmapped. */
  757. do {
  758. ENTER_GC();
  759. GC_in_thread_creation = TRUE;
  760. GC_collect_a_little_inner(1);
  761. GC_in_thread_creation = FALSE;
  762. EXIT_GC();
  763. UNLOCK();
  764. Sleep(0); /* yield */
  765. LOCK();
  766. } while (GC_incremental && GC_collection_in_progress()
  767. && (wait_for_all || old_gc_no == GC_gc_no));
  768. }
  769. }
  770. GC_API int GC_CALL GC_unregister_my_thread(void)
  771. {
  772. DCL_LOCK_STATE;
  773. # ifdef DEBUG_THREADS
  774. GC_log_printf("Unregistering thread 0x%lx\n", (long)GetCurrentThreadId());
  775. # endif
  776. if (GC_win32_dll_threads) {
  777. # if defined(THREAD_LOCAL_ALLOC)
  778. /* Can't happen: see GC_use_threads_discovery(). */
  779. GC_ASSERT(FALSE);
  780. # else
  781. /* FIXME: Should we just ignore this? */
  782. GC_delete_thread(GetCurrentThreadId());
  783. # endif
  784. } else {
  785. # if defined(THREAD_LOCAL_ALLOC) || defined(GC_PTHREADS)
  786. GC_thread me;
  787. # endif
  788. DWORD thread_id = GetCurrentThreadId();
  789. LOCK();
  790. GC_wait_for_gc_completion(FALSE);
  791. # if defined(THREAD_LOCAL_ALLOC) || defined(GC_PTHREADS)
  792. me = GC_lookup_thread_inner(thread_id);
  793. CHECK_LOOKUP_MY_THREAD(me);
  794. GC_ASSERT(!KNOWN_FINISHED(me));
  795. # endif
  796. # if defined(THREAD_LOCAL_ALLOC)
  797. GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
  798. GC_destroy_thread_local(&(me->tlfs));
  799. # endif
  800. # ifdef GC_PTHREADS
  801. if ((me -> flags & DETACHED) == 0) {
  802. me -> flags |= FINISHED;
  803. } else
  804. # endif
  805. /* else */ {
  806. GC_delete_thread(thread_id);
  807. }
  808. # if defined(THREAD_LOCAL_ALLOC)
  809. /* It is required to call remove_specific defined in specific.c. */
  810. GC_remove_specific(GC_thread_key);
  811. # endif
  812. UNLOCK();
  813. }
  814. return GC_SUCCESS;
  815. }
  816. /* Wrapper for functions that are likely to block for an appreciable */
  817. /* length of time. */
  818. /* GC_do_blocking_inner() is nearly the same as in pthread_support.c */
  819. GC_INNER void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED)
  820. {
  821. struct blocking_data * d = (struct blocking_data *) data;
  822. DWORD thread_id = GetCurrentThreadId();
  823. GC_thread me;
  824. # ifdef IA64
  825. ptr_t stack_ptr = GC_save_regs_in_stack();
  826. # endif
  827. DCL_LOCK_STATE;
  828. LOCK();
  829. me = GC_lookup_thread_inner(thread_id);
  830. CHECK_LOOKUP_MY_THREAD(me);
  831. GC_ASSERT(me -> thread_blocked_sp == NULL);
  832. # ifdef IA64
  833. me -> backing_store_ptr = stack_ptr;
  834. # endif
  835. me -> thread_blocked_sp = (ptr_t) &d; /* save approx. sp */
  836. /* Save context here if we want to support precise stack marking */
  837. UNLOCK();
  838. d -> client_data = (d -> fn)(d -> client_data);
  839. LOCK(); /* This will block if the world is stopped. */
  840. # if defined(CPPCHECK)
  841. GC_noop1((word)me->thread_blocked_sp);
  842. # endif
  843. me -> thread_blocked_sp = NULL;
  844. UNLOCK();
  845. }
  846. /* GC_call_with_gc_active() has the opposite to GC_do_blocking() */
  847. /* functionality. It might be called from a user function invoked by */
  848. /* GC_do_blocking() to temporarily back allow calling any GC function */
  849. /* and/or manipulating pointers to the garbage collected heap. */
  850. GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
  851. void * client_data)
  852. {
  853. struct GC_traced_stack_sect_s stacksect;
  854. DWORD thread_id = GetCurrentThreadId();
  855. GC_thread me;
  856. DCL_LOCK_STATE;
  857. LOCK(); /* This will block if the world is stopped. */
  858. me = GC_lookup_thread_inner(thread_id);
  859. CHECK_LOOKUP_MY_THREAD(me);
  860. /* Adjust our stack base value (this could happen unless */
  861. /* GC_get_stack_base() was used which returned GC_SUCCESS). */
  862. GC_ASSERT(me -> stack_base != NULL);
  863. if ((word)me->stack_base < (word)(&stacksect))
  864. me -> stack_base = (ptr_t)(&stacksect);
  865. if (me -> thread_blocked_sp == NULL) {
  866. /* We are not inside GC_do_blocking() - do nothing more. */
  867. UNLOCK();
  868. client_data = fn(client_data);
  869. /* Prevent treating the above as a tail call. */
  870. GC_noop1((word)(&stacksect));
  871. return client_data; /* result */
  872. }
  873. /* Setup new "stack section". */
  874. stacksect.saved_stack_ptr = me -> thread_blocked_sp;
  875. # ifdef IA64
  876. /* This is the same as in GC_call_with_stack_base(). */
  877. stacksect.backing_store_end = GC_save_regs_in_stack();
  878. /* Unnecessarily flushes register stack, */
  879. /* but that probably doesn't hurt. */
  880. stacksect.saved_backing_store_ptr = me -> backing_store_ptr;
  881. # endif
  882. stacksect.prev = me -> traced_stack_sect;
  883. me -> thread_blocked_sp = NULL;
  884. me -> traced_stack_sect = &stacksect;
  885. UNLOCK();
  886. client_data = fn(client_data);
  887. GC_ASSERT(me -> thread_blocked_sp == NULL);
  888. GC_ASSERT(me -> traced_stack_sect == &stacksect);
  889. /* Restore original "stack section". */
  890. LOCK();
  891. # if defined(CPPCHECK)
  892. GC_noop1((word)me->traced_stack_sect);
  893. # endif
  894. me -> traced_stack_sect = stacksect.prev;
  895. # ifdef IA64
  896. me -> backing_store_ptr = stacksect.saved_backing_store_ptr;
  897. # endif
  898. me -> thread_blocked_sp = stacksect.saved_stack_ptr;
  899. UNLOCK();
  900. return client_data; /* result */
  901. }
  902. #ifdef GC_PTHREADS
  903. /* A quick-and-dirty cache of the mapping between pthread_t */
  904. /* and win32 thread id. */
  905. # define PTHREAD_MAP_SIZE 512
  906. DWORD GC_pthread_map_cache[PTHREAD_MAP_SIZE] = {0};
  907. # define PTHREAD_MAP_INDEX(pthread_id) \
  908. ((NUMERIC_THREAD_ID(pthread_id) >> 5) % PTHREAD_MAP_SIZE)
  909. /* It appears pthread_t is really a pointer type ... */
  910. # define SET_PTHREAD_MAP_CACHE(pthread_id, win32_id) \
  911. (void)(GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)] = (win32_id))
  912. # define GET_PTHREAD_MAP_CACHE(pthread_id) \
  913. GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)]
  914. /* Return a GC_thread corresponding to a given pthread_t. */
  915. /* Returns 0 if it's not there. */
  916. /* We assume that this is only called for pthread ids that */
  917. /* have not yet terminated or are still joinable, and */
  918. /* cannot be concurrently terminated. */
  919. /* Assumes we do NOT hold the allocation lock. */
  920. STATIC GC_thread GC_lookup_pthread(pthread_t id)
  921. {
  922. # ifndef GC_NO_THREADS_DISCOVERY
  923. if (GC_win32_dll_threads) {
  924. int i;
  925. LONG my_max = GC_get_max_thread_index();
  926. for (i = 0; i <= my_max &&
  927. (!AO_load_acquire(&dll_thread_table[i].tm.in_use)
  928. || THREAD_EQUAL(dll_thread_table[i].pthread_id, id));
  929. /* Must still be in_use, since nobody else can */
  930. /* store our thread_id. */
  931. i++) {
  932. /* empty */
  933. }
  934. return i <= my_max ? (GC_thread)(dll_thread_table + i) : NULL;
  935. } else
  936. # endif
  937. /* else */ {
  938. /* We first try the cache. If that fails, we use a very slow */
  939. /* approach. */
  940. DWORD win32_id = GET_PTHREAD_MAP_CACHE(id);
  941. int hv_guess = THREAD_TABLE_INDEX(win32_id);
  942. int hv;
  943. GC_thread p;
  944. DCL_LOCK_STATE;
  945. LOCK();
  946. for (p = GC_threads[hv_guess]; 0 != p; p = p -> tm.next) {
  947. if (THREAD_EQUAL(p -> pthread_id, id))
  948. goto foundit;
  949. }
  950. for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
  951. for (p = GC_threads[hv]; 0 != p; p = p -> tm.next) {
  952. if (THREAD_EQUAL(p -> pthread_id, id))
  953. goto foundit;
  954. }
  955. }
  956. p = 0;
  957. foundit:
  958. UNLOCK();
  959. return p;
  960. }
  961. }
  962. #endif /* GC_PTHREADS */
  963. #ifdef CAN_HANDLE_FORK
  964. /* Similar to that in pthread_support.c but also rehashes the table */
  965. /* since hash map key (thread_id) differs from that in the parent. */
  966. STATIC void GC_remove_all_threads_but_me(void)
  967. {
  968. int hv;
  969. GC_thread p, next, me = NULL;
  970. DWORD thread_id;
  971. pthread_t pthread_id = pthread_self(); /* same as in parent */
  972. GC_ASSERT(!GC_win32_dll_threads);
  973. for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
  974. for (p = GC_threads[hv]; 0 != p; p = next) {
  975. next = p -> tm.next;
  976. if (THREAD_EQUAL(p -> pthread_id, pthread_id)
  977. && me == NULL) { /* ignore dead threads with the same id */
  978. me = p;
  979. p -> tm.next = 0;
  980. } else {
  981. # ifdef THREAD_LOCAL_ALLOC
  982. if ((p -> flags & FINISHED) == 0) {
  983. /* Cannot call GC_destroy_thread_local here (see the */
  984. /* corresponding comment in pthread_support.c). */
  985. GC_remove_specific_after_fork(GC_thread_key, p -> pthread_id);
  986. }
  987. # endif
  988. if (&first_thread != p)
  989. GC_INTERNAL_FREE(p);
  990. }
  991. }
  992. GC_threads[hv] = NULL;
  993. }
  994. /* Put "me" back to GC_threads. */
  995. GC_ASSERT(me != NULL);
  996. thread_id = GetCurrentThreadId(); /* differs from that in parent */
  997. GC_threads[THREAD_TABLE_INDEX(thread_id)] = me;
  998. /* Update Win32 thread Id and handle. */
  999. me -> id = thread_id;
  1000. # ifndef MSWINCE
  1001. if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
  1002. GetCurrentProcess(), (HANDLE *)&me->handle,
  1003. 0 /* dwDesiredAccess */, FALSE /* bInheritHandle */,
  1004. DUPLICATE_SAME_ACCESS))
  1005. ABORT("DuplicateHandle failed");
  1006. # endif
  1007. # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
  1008. /* For Cygwin, we need to re-assign thread-local pointer to */
  1009. /* 'tlfs' (it is OK to call GC_destroy_thread_local and */
  1010. /* GC_free_internal before this action). */
  1011. if (GC_setspecific(GC_thread_key, &me->tlfs) != 0)
  1012. ABORT("GC_setspecific failed (in child)");
  1013. # endif
  1014. }
  1015. static void fork_prepare_proc(void)
  1016. {
  1017. LOCK();
  1018. # ifdef PARALLEL_MARK
  1019. if (GC_parallel)
  1020. GC_wait_for_reclaim();
  1021. # endif
  1022. GC_wait_for_gc_completion(TRUE);
  1023. # ifdef PARALLEL_MARK
  1024. if (GC_parallel)
  1025. GC_acquire_mark_lock();
  1026. # endif
  1027. }
  1028. static void fork_parent_proc(void)
  1029. {
  1030. # ifdef PARALLEL_MARK
  1031. if (GC_parallel)
  1032. GC_release_mark_lock();
  1033. # endif
  1034. UNLOCK();
  1035. }
  1036. static void fork_child_proc(void)
  1037. {
  1038. # ifdef PARALLEL_MARK
  1039. if (GC_parallel) {
  1040. GC_release_mark_lock();
  1041. GC_parallel = FALSE; /* or GC_markers_m1 = 0 */
  1042. /* Turn off parallel marking in the child, since we are */
  1043. /* probably just going to exec, and we would have to */
  1044. /* restart mark threads. */
  1045. }
  1046. # endif
  1047. GC_remove_all_threads_but_me();
  1048. UNLOCK();
  1049. }
  1050. /* Routines for fork handling by client (no-op if pthread_atfork works). */
  1051. GC_API void GC_CALL GC_atfork_prepare(void)
  1052. {
  1053. if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
  1054. if (GC_handle_fork <= 0)
  1055. fork_prepare_proc();
  1056. }
  1057. GC_API void GC_CALL GC_atfork_parent(void)
  1058. {
  1059. if (GC_handle_fork <= 0)
  1060. fork_parent_proc();
  1061. }
  1062. GC_API void GC_CALL GC_atfork_child(void)
  1063. {
  1064. if (GC_handle_fork <= 0)
  1065. fork_child_proc();
  1066. }
  1067. #endif /* CAN_HANDLE_FORK */
  1068. void GC_push_thread_structures(void)
  1069. {
  1070. GC_ASSERT(I_HOLD_LOCK());
  1071. # ifndef GC_NO_THREADS_DISCOVERY
  1072. if (GC_win32_dll_threads) {
  1073. /* Unlike the other threads implementations, the thread table */
  1074. /* here contains no pointers to the collectible heap (note also */
  1075. /* that GC_PTHREADS is incompatible with DllMain-based thread */
  1076. /* registration). Thus we have no private structures we need */
  1077. /* to preserve. */
  1078. } else
  1079. # endif
  1080. /* else */ {
  1081. GC_PUSH_ALL_SYM(GC_threads);
  1082. }
  1083. # if defined(THREAD_LOCAL_ALLOC)
  1084. GC_PUSH_ALL_SYM(GC_thread_key);
  1085. /* Just in case we ever use our own TLS implementation. */
  1086. # endif
  1087. }
  1088. #ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  1089. # ifndef CONTEXT_EXCEPTION_ACTIVE
  1090. # define CONTEXT_EXCEPTION_ACTIVE 0x08000000
  1091. # define CONTEXT_EXCEPTION_REQUEST 0x40000000
  1092. # define CONTEXT_EXCEPTION_REPORTING 0x80000000
  1093. # endif
  1094. static BOOL isWow64; /* Is running 32-bit code on Win64? */
  1095. # define GET_THREAD_CONTEXT_FLAGS (isWow64 \
  1096. ? CONTEXT_INTEGER | CONTEXT_CONTROL \
  1097. | CONTEXT_EXCEPTION_REQUEST | CONTEXT_SEGMENTS \
  1098. : CONTEXT_INTEGER | CONTEXT_CONTROL)
  1099. #else
  1100. # define GET_THREAD_CONTEXT_FLAGS (CONTEXT_INTEGER | CONTEXT_CONTROL)
  1101. #endif /* !WOW64_THREAD_CONTEXT_WORKAROUND */
  1102. /* Suspend the given thread, if it's still active. */
  1103. STATIC void GC_suspend(GC_thread t)
  1104. {
  1105. # ifdef RETRY_GET_THREAD_CONTEXT
  1106. int retry_cnt = 0;
  1107. # define MAX_SUSPEND_THREAD_RETRIES (1000 * 1000)
  1108. # endif
  1109. UNPROTECT_THREAD(t);
  1110. GC_acquire_dirty_lock ();
  1111. # ifdef MSWINCE
  1112. /* SuspendThread() will fail if thread is running kernel code. */
  1113. while (SuspendThread(THREAD_HANDLE(t)) == (DWORD)-1)
  1114. Sleep(10); /* in millis */
  1115. # elif defined(RETRY_GET_THREAD_CONTEXT)
  1116. for (;;) {
  1117. if (SuspendThread(t->handle) != (DWORD)-1) {
  1118. CONTEXT context;
  1119. context.ContextFlags = GET_THREAD_CONTEXT_FLAGS;
  1120. if (GetThreadContext(t->handle, &context)) {
  1121. /* TODO: WoW64 extra workaround: if CONTEXT_EXCEPTION_ACTIVE */
  1122. /* then Sleep(1) and retry. */
  1123. t->context_sp = copy_ptr_regs(t->context_regs, &context);
  1124. break; /* success; the context pointer registers are saved */
  1125. }
  1126. /* Resume the thread, try to suspend it in a better location. */
  1127. if (ResumeThread(t->handle) == (DWORD)-1)
  1128. ABORT("ResumeThread failed");
  1129. }
  1130. if (retry_cnt > 1)
  1131. Sleep(0); /* yield */
  1132. if (++retry_cnt >= MAX_SUSPEND_THREAD_RETRIES)
  1133. ABORT("SuspendThread loop failed"); /* something must be wrong */
  1134. }
  1135. # else
  1136. if (SuspendThread(t -> handle) == (DWORD)-1)
  1137. ABORT("SuspendThread failed");
  1138. # endif
  1139. t -> suspended = (unsigned char)TRUE;
  1140. GC_release_dirty_lock();
  1141. if (GC_on_thread_event)
  1142. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, THREAD_HANDLE(t));
  1143. }
  1144. #if defined(GC_ASSERTIONS) && (defined(MSWIN32) || defined(MSWINCE))
  1145. GC_INNER GC_bool GC_write_disabled = FALSE;
  1146. /* TRUE only if GC_stop_world() acquired GC_write_cs. */
  1147. #endif
  1148. /* Defined in misc.c */
  1149. extern CRITICAL_SECTION GC_write_cs;
  1150. GC_INNER void GC_stop_world(void)
  1151. {
  1152. DWORD thread_id = GetCurrentThreadId();
  1153. if (!GC_thr_initialized)
  1154. ABORT("GC_stop_world() called before GC_thr_init()");
  1155. GC_ASSERT(I_HOLD_LOCK());
  1156. /* This code is the same as in pthread_stop_world.c */
  1157. # ifdef PARALLEL_MARK
  1158. if (GC_parallel) {
  1159. GC_acquire_mark_lock();
  1160. GC_ASSERT(GC_fl_builder_count == 0);
  1161. /* We should have previously waited for it to become zero. */
  1162. }
  1163. # endif /* PARALLEL_MARK */
  1164. # if !defined(GC_NO_THREADS_DISCOVERY) || defined(GC_ASSERTIONS)
  1165. GC_please_stop = TRUE;
  1166. # endif
  1167. # ifndef CYGWIN32
  1168. # ifndef MSWIN_XBOX1
  1169. GC_ASSERT(!GC_write_disabled);
  1170. # endif
  1171. EnterCriticalSection(&GC_write_cs);
  1172. # endif
  1173. # if defined(GC_ASSERTIONS) && (defined(MSWIN32) || defined(MSWINCE))
  1174. /* It's not allowed to call GC_printf() (and friends) here down to */
  1175. /* LeaveCriticalSection (same applies recursively to GC_suspend, */
  1176. /* GC_delete_gc_thread_no_free, GC_get_max_thread_index, GC_size */
  1177. /* and GC_remove_protection). */
  1178. GC_write_disabled = TRUE;
  1179. # endif
  1180. # ifndef GC_NO_THREADS_DISCOVERY
  1181. if (GC_win32_dll_threads) {
  1182. int i;
  1183. int my_max;
  1184. /* Any threads being created during this loop will end up setting */
  1185. /* GC_attached_thread when they start. This will force marking */
  1186. /* to restart. This is not ideal, but hopefully correct. */
  1187. AO_store(&GC_attached_thread, FALSE);
  1188. my_max = (int)GC_get_max_thread_index();
  1189. for (i = 0; i <= my_max; i++) {
  1190. GC_vthread t = dll_thread_table + i;
  1191. if (t -> stack_base != 0 && t -> thread_blocked_sp == NULL
  1192. && t -> id != thread_id) {
  1193. GC_suspend((GC_thread)t);
  1194. }
  1195. }
  1196. } else
  1197. # endif
  1198. /* else */ {
  1199. GC_thread t;
  1200. int i;
  1201. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  1202. for (t = GC_threads[i]; t != 0; t = t -> tm.next) {
  1203. if (t -> stack_base != 0 && t -> thread_blocked_sp == NULL
  1204. && !KNOWN_FINISHED(t) && t -> id != thread_id) {
  1205. GC_suspend(t);
  1206. }
  1207. }
  1208. }
  1209. }
  1210. # if defined(GC_ASSERTIONS) && (defined(MSWIN32) || defined(MSWINCE))
  1211. GC_write_disabled = FALSE;
  1212. # endif
  1213. # ifndef CYGWIN32
  1214. LeaveCriticalSection(&GC_write_cs);
  1215. # endif
  1216. # ifdef PARALLEL_MARK
  1217. if (GC_parallel)
  1218. GC_release_mark_lock();
  1219. # endif
  1220. }
  1221. GC_INNER void GC_start_world(void)
  1222. {
  1223. # ifdef GC_ASSERTIONS
  1224. DWORD thread_id = GetCurrentThreadId();
  1225. # endif
  1226. GC_ASSERT(I_HOLD_LOCK());
  1227. if (GC_win32_dll_threads) {
  1228. LONG my_max = GC_get_max_thread_index();
  1229. int i;
  1230. for (i = 0; i <= my_max; i++) {
  1231. GC_thread t = (GC_thread)(dll_thread_table + i);
  1232. if (t -> suspended) {
  1233. GC_ASSERT(t -> stack_base != 0 && t -> id != thread_id);
  1234. if (ResumeThread(THREAD_HANDLE(t)) == (DWORD)-1)
  1235. ABORT("ResumeThread failed");
  1236. t -> suspended = FALSE;
  1237. if (GC_on_thread_event)
  1238. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, THREAD_HANDLE(t));
  1239. }
  1240. }
  1241. } else {
  1242. GC_thread t;
  1243. int i;
  1244. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  1245. for (t = GC_threads[i]; t != 0; t = t -> tm.next) {
  1246. if (t -> suspended) {
  1247. GC_ASSERT(t -> stack_base != 0 && t -> id != thread_id);
  1248. if (ResumeThread(THREAD_HANDLE(t)) == (DWORD)-1)
  1249. ABORT("ResumeThread failed");
  1250. UNPROTECT_THREAD(t);
  1251. t -> suspended = FALSE;
  1252. if (GC_on_thread_event)
  1253. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, THREAD_HANDLE(t));
  1254. }
  1255. }
  1256. }
  1257. }
  1258. # if !defined(GC_NO_THREADS_DISCOVERY) || defined(GC_ASSERTIONS)
  1259. GC_please_stop = FALSE;
  1260. # endif
  1261. }
  1262. #ifdef MSWINCE
  1263. /* The VirtualQuery calls below won't work properly on some old WinCE */
  1264. /* versions, but since each stack is restricted to an aligned 64 KiB */
  1265. /* region of virtual memory we can just take the next lowest multiple */
  1266. /* of 64 KiB. The result of this macro must not be used as its */
  1267. /* argument later and must not be used as the lower bound for sp */
  1268. /* check (since the stack may be bigger than 64 KiB). */
  1269. # define GC_wince_evaluate_stack_min(s) \
  1270. (ptr_t)(((word)(s) - 1) & ~(word)0xFFFF)
  1271. #elif defined(GC_ASSERTIONS)
  1272. # define GC_dont_query_stack_min FALSE
  1273. #endif
  1274. /* A cache holding the results of the recent VirtualQuery call. */
  1275. /* Protected by the allocation lock. */
  1276. static ptr_t last_address = 0;
  1277. static MEMORY_BASIC_INFORMATION last_info;
  1278. /* Probe stack memory region (starting at "s") to find out its */
  1279. /* lowest address (i.e. stack top). */
  1280. /* S must be a mapped address inside the region, NOT the first */
  1281. /* unmapped address. */
  1282. STATIC ptr_t GC_get_stack_min(ptr_t s)
  1283. {
  1284. ptr_t bottom;
  1285. GC_ASSERT(I_HOLD_LOCK());
  1286. if (s != last_address) {
  1287. VirtualQuery(s, &last_info, sizeof(last_info));
  1288. last_address = s;
  1289. }
  1290. do {
  1291. bottom = (ptr_t)last_info.BaseAddress;
  1292. VirtualQuery(bottom - 1, &last_info, sizeof(last_info));
  1293. last_address = bottom - 1;
  1294. } while ((last_info.Protect & PAGE_READWRITE)
  1295. && !(last_info.Protect & PAGE_GUARD));
  1296. return(bottom);
  1297. }
  1298. /* Return true if the page at s has protections appropriate */
  1299. /* for a stack page. */
  1300. static GC_bool may_be_in_stack(ptr_t s)
  1301. {
  1302. GC_ASSERT(I_HOLD_LOCK());
  1303. if (s != last_address) {
  1304. VirtualQuery(s, &last_info, sizeof(last_info));
  1305. last_address = s;
  1306. }
  1307. return (last_info.Protect & PAGE_READWRITE)
  1308. && !(last_info.Protect & PAGE_GUARD);
  1309. }
  1310. /* Copy all registers that might point into the heap. Frame */
  1311. /* pointer registers are included in case client code was */
  1312. /* compiled with the 'omit frame pointer' optimization. */
  1313. /* The context register values are stored to regs argument */
  1314. /* which is expected to be of PUSHED_REGS_COUNT length exactly. */
  1315. /* The functions returns the context stack pointer value. */
  1316. static ptr_t copy_ptr_regs(word *regs, const CONTEXT *pcontext) {
  1317. ptr_t sp;
  1318. int cnt = 0;
  1319. # define context (*pcontext)
  1320. # define PUSH1(reg) (regs[cnt++] = (word)pcontext->reg)
  1321. # define PUSH2(r1,r2) (PUSH1(r1), PUSH1(r2))
  1322. # define PUSH4(r1,r2,r3,r4) (PUSH2(r1,r2), PUSH2(r3,r4))
  1323. # if defined(I386)
  1324. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  1325. PUSH2(ContextFlags, SegFs); /* cannot contain pointers */
  1326. # endif
  1327. PUSH4(Edi,Esi,Ebx,Edx), PUSH2(Ecx,Eax), PUSH1(Ebp);
  1328. sp = (ptr_t)context.Esp;
  1329. # elif defined(X86_64)
  1330. PUSH4(Rax,Rcx,Rdx,Rbx); PUSH2(Rbp, Rsi); PUSH1(Rdi);
  1331. PUSH4(R8, R9, R10, R11); PUSH4(R12, R13, R14, R15);
  1332. sp = (ptr_t)context.Rsp;
  1333. # elif defined(ARM32)
  1334. PUSH4(R0,R1,R2,R3),PUSH4(R4,R5,R6,R7),PUSH4(R8,R9,R10,R11);
  1335. PUSH1(R12);
  1336. sp = (ptr_t)context.Sp;
  1337. # elif defined(AARCH64)
  1338. PUSH4(X0,X1,X2,X3),PUSH4(X4,X5,X6,X7),PUSH4(X8,X9,X10,X11);
  1339. PUSH4(X12,X13,X14,X15),PUSH4(X16,X17,X18,X19),PUSH4(X20,X21,X22,X23);
  1340. PUSH4(X24,X25,X26,X27),PUSH1(X28);
  1341. PUSH1(Lr);
  1342. sp = (ptr_t)context.Sp;
  1343. # elif defined(SHx)
  1344. PUSH4(R0,R1,R2,R3), PUSH4(R4,R5,R6,R7), PUSH4(R8,R9,R10,R11);
  1345. PUSH2(R12,R13), PUSH1(R14);
  1346. sp = (ptr_t)context.R15;
  1347. # elif defined(MIPS)
  1348. PUSH4(IntAt,IntV0,IntV1,IntA0), PUSH4(IntA1,IntA2,IntA3,IntT0);
  1349. PUSH4(IntT1,IntT2,IntT3,IntT4), PUSH4(IntT5,IntT6,IntT7,IntS0);
  1350. PUSH4(IntS1,IntS2,IntS3,IntS4), PUSH4(IntS5,IntS6,IntS7,IntT8);
  1351. PUSH4(IntT9,IntK0,IntK1,IntS8);
  1352. sp = (ptr_t)context.IntSp;
  1353. # elif defined(PPC)
  1354. PUSH4(Gpr0, Gpr3, Gpr4, Gpr5), PUSH4(Gpr6, Gpr7, Gpr8, Gpr9);
  1355. PUSH4(Gpr10,Gpr11,Gpr12,Gpr14), PUSH4(Gpr15,Gpr16,Gpr17,Gpr18);
  1356. PUSH4(Gpr19,Gpr20,Gpr21,Gpr22), PUSH4(Gpr23,Gpr24,Gpr25,Gpr26);
  1357. PUSH4(Gpr27,Gpr28,Gpr29,Gpr30), PUSH1(Gpr31);
  1358. sp = (ptr_t)context.Gpr1;
  1359. # elif defined(ALPHA)
  1360. PUSH4(IntV0,IntT0,IntT1,IntT2), PUSH4(IntT3,IntT4,IntT5,IntT6);
  1361. PUSH4(IntT7,IntS0,IntS1,IntS2), PUSH4(IntS3,IntS4,IntS5,IntFp);
  1362. PUSH4(IntA0,IntA1,IntA2,IntA3), PUSH4(IntA4,IntA5,IntT8,IntT9);
  1363. PUSH4(IntT10,IntT11,IntT12,IntAt);
  1364. sp = (ptr_t)context.IntSp;
  1365. # elif !defined(CPPCHECK)
  1366. # error Architecture is not supported
  1367. # endif
  1368. # undef context
  1369. GC_ASSERT(cnt == PUSHED_REGS_COUNT);
  1370. return sp;
  1371. }
  1372. STATIC word GC_push_stack_for(GC_thread thread, DWORD me)
  1373. {
  1374. ptr_t sp, stack_min;
  1375. struct GC_traced_stack_sect_s *traced_stack_sect =
  1376. thread -> traced_stack_sect;
  1377. if (thread -> id == me) {
  1378. GC_ASSERT(thread -> thread_blocked_sp == NULL);
  1379. sp = GC_approx_sp();
  1380. } else if ((sp = thread -> thread_blocked_sp) == NULL) {
  1381. /* Use saved sp value for blocked threads. */
  1382. int i = 0;
  1383. # ifdef RETRY_GET_THREAD_CONTEXT
  1384. /* We cache context when suspending the thread since it may */
  1385. /* require looping. */
  1386. word *regs = thread->context_regs;
  1387. if (thread->suspended) {
  1388. sp = thread->context_sp;
  1389. } else
  1390. # else
  1391. word regs[PUSHED_REGS_COUNT];
  1392. # endif
  1393. /* else */ {
  1394. CONTEXT context;
  1395. /* For unblocked threads call GetThreadContext(). */
  1396. context.ContextFlags = GET_THREAD_CONTEXT_FLAGS;
  1397. if (GetThreadContext(THREAD_HANDLE(thread), &context)) {
  1398. sp = copy_ptr_regs(regs, &context);
  1399. } else {
  1400. # ifdef RETRY_GET_THREAD_CONTEXT
  1401. /* At least, try to use the stale context if saved. */
  1402. sp = thread->context_sp;
  1403. if (NULL == sp) {
  1404. /* Skip the current thread, anyway its stack will */
  1405. /* be pushed when the world is stopped. */
  1406. return 0;
  1407. }
  1408. # else
  1409. ABORT("GetThreadContext failed");
  1410. # endif
  1411. }
  1412. }
  1413. # ifdef THREAD_LOCAL_ALLOC
  1414. GC_ASSERT(thread->suspended || !GC_world_stopped);
  1415. # endif
  1416. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  1417. i += 2; /* skip ContextFlags and SegFs */
  1418. # endif
  1419. for (; i < PUSHED_REGS_COUNT; i++)
  1420. GC_push_one(regs[i]);
  1421. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  1422. /* WoW64 workaround. */
  1423. if (isWow64) {
  1424. DWORD ContextFlags = (DWORD)regs[0];
  1425. WORD SegFs = (WORD)regs[1];
  1426. if ((ContextFlags & CONTEXT_EXCEPTION_REPORTING) != 0
  1427. && (ContextFlags & (CONTEXT_EXCEPTION_ACTIVE
  1428. /* | CONTEXT_SERVICE_ACTIVE */)) != 0) {
  1429. PNT_TIB tib = thread->tib;
  1430. if (!tib) {
  1431. ABORT("TIB is invalid!");
  1432. }
  1433. # ifdef DEBUG_THREADS
  1434. GC_log_printf("TIB stack limit/base: %p .. %p\n",
  1435. (void *)tib->StackLimit, (void *)tib->StackBase);
  1436. # endif
  1437. GC_ASSERT(!((word)thread->stack_base
  1438. COOLER_THAN (word)tib->StackBase));
  1439. # ifdef UNITY_MISSING_COMMIT_5668de71
  1440. if (thread->stack_base != thread->initial_stack_base
  1441. /* We are in a coroutine. */
  1442. && ((word)thread->stack_base <= (word)tib->StackLimit
  1443. || (word)tib->StackBase < (word)thread->stack_base)) {
  1444. /* The coroutine stack is not within TIB stack. */
  1445. WARN("GetThreadContext might return stale register values"
  1446. " including ESP=%p\n", sp);
  1447. /* TODO: Because of WoW64 bug, there is no guarantee that */
  1448. /* sp really points to the stack top but, for now, we do */
  1449. /* our best as the TIB stack limit/base cannot be used */
  1450. /* while we are inside a coroutine. */
  1451. } else
  1452. # endif
  1453. {
  1454. /* GetThreadContext() might return stale register values, */
  1455. /* so we scan the entire stack region (down to the stack */
  1456. /* limit). There is no 100% guarantee that all the */
  1457. /* registers are pushed but we do our best (the proper */
  1458. /* solution would be to fix it inside Windows OS). */
  1459. sp = (ptr_t)tib->StackLimit;
  1460. }
  1461. } /* else */
  1462. # ifdef DEBUG_THREADS
  1463. else {
  1464. static GC_bool logged;
  1465. if (!logged
  1466. && (ContextFlags & CONTEXT_EXCEPTION_REPORTING) == 0) {
  1467. GC_log_printf("CONTEXT_EXCEPTION_REQUEST not supported\n");
  1468. logged = TRUE;
  1469. }
  1470. }
  1471. # endif
  1472. }
  1473. # endif /* WOW64_THREAD_CONTEXT_WORKAROUND */
  1474. } /* ! current thread */
  1475. /* Set stack_min to the lowest address in the thread stack, */
  1476. /* or to an address in the thread stack no larger than sp, */
  1477. /* taking advantage of the old value to avoid slow traversals */
  1478. /* of large stacks. */
  1479. if (thread -> last_stack_min == ADDR_LIMIT) {
  1480. # ifdef MSWINCE
  1481. if (GC_dont_query_stack_min) {
  1482. stack_min = GC_wince_evaluate_stack_min(traced_stack_sect != NULL ?
  1483. (ptr_t)traced_stack_sect : thread -> stack_base);
  1484. /* Keep last_stack_min value unmodified. */
  1485. } else
  1486. # endif
  1487. /* else */ {
  1488. stack_min = GC_get_stack_min(traced_stack_sect != NULL ?
  1489. (ptr_t)traced_stack_sect : thread -> stack_base);
  1490. UNPROTECT_THREAD(thread);
  1491. thread -> last_stack_min = stack_min;
  1492. }
  1493. } else {
  1494. /* First, adjust the latest known minimum stack address if we */
  1495. /* are inside GC_call_with_gc_active(). */
  1496. if (traced_stack_sect != NULL &&
  1497. (word)thread->last_stack_min > (word)traced_stack_sect) {
  1498. UNPROTECT_THREAD(thread);
  1499. thread -> last_stack_min = (ptr_t)traced_stack_sect;
  1500. }
  1501. if ((word)sp < (word)thread->stack_base
  1502. && (word)sp >= (word)thread->last_stack_min) {
  1503. stack_min = sp;
  1504. } else {
  1505. /* In the current thread it is always safe to use sp value. */
  1506. if (may_be_in_stack(thread -> id == me &&
  1507. (word)sp < (word)thread->last_stack_min ?
  1508. sp : thread -> last_stack_min)) {
  1509. stack_min = (ptr_t)last_info.BaseAddress;
  1510. /* Do not probe rest of the stack if sp is correct. */
  1511. if ((word)sp < (word)stack_min
  1512. || (word)sp >= (word)thread->stack_base)
  1513. stack_min = GC_get_stack_min(thread -> last_stack_min);
  1514. } else {
  1515. /* Stack shrunk? Is this possible? */
  1516. stack_min = GC_get_stack_min(thread -> stack_base);
  1517. }
  1518. UNPROTECT_THREAD(thread);
  1519. thread -> last_stack_min = stack_min;
  1520. }
  1521. }
  1522. GC_ASSERT(GC_dont_query_stack_min
  1523. || stack_min == GC_get_stack_min(thread -> stack_base)
  1524. || ((word)sp >= (word)stack_min
  1525. && (word)stack_min < (word)thread->stack_base
  1526. && (word)stack_min
  1527. > (word)GC_get_stack_min(thread -> stack_base)));
  1528. if ((word)sp >= (word)stack_min && (word)sp < (word)thread->stack_base) {
  1529. # ifdef DEBUG_THREADS
  1530. GC_log_printf("Pushing stack for 0x%x from sp %p to %p from 0x%x\n",
  1531. (int)thread->id, (void *)sp, (void *)thread->stack_base,
  1532. (int)me);
  1533. # endif
  1534. GC_push_all_stack_sections(sp, thread->stack_base, traced_stack_sect);
  1535. } else {
  1536. /* If not current thread then it is possible for sp to point to */
  1537. /* the guarded (untouched yet) page just below the current */
  1538. /* stack_min of the thread. */
  1539. if (thread -> id == me || (word)sp >= (word)thread->stack_base
  1540. || (word)(sp + GC_page_size) < (word)stack_min)
  1541. WARN("Thread stack pointer %p out of range, pushing everything\n",
  1542. sp);
  1543. # ifdef DEBUG_THREADS
  1544. GC_log_printf("Pushing stack for 0x%x from (min) %p to %p from 0x%x\n",
  1545. (int)thread->id, (void *)stack_min,
  1546. (void *)thread->stack_base, (int)me);
  1547. # endif
  1548. /* Push everything - ignore "traced stack section" data. */
  1549. GC_push_all_stack(stack_min, thread->stack_base);
  1550. }
  1551. return thread->stack_base - sp; /* stack grows down */
  1552. }
  1553. /* We hold allocation lock. Should do exactly the right thing if the */
  1554. /* world is stopped. Should not fail if it isn't. */
  1555. GC_INNER void GC_push_all_stacks(void)
  1556. {
  1557. DWORD thread_id = GetCurrentThreadId();
  1558. GC_bool found_me = FALSE;
  1559. # ifndef SMALL_CONFIG
  1560. unsigned nthreads = 0;
  1561. # endif
  1562. word total_size = 0;
  1563. # ifndef GC_NO_THREADS_DISCOVERY
  1564. if (GC_win32_dll_threads) {
  1565. int i;
  1566. LONG my_max = GC_get_max_thread_index();
  1567. for (i = 0; i <= my_max; i++) {
  1568. GC_thread t = (GC_thread)(dll_thread_table + i);
  1569. if (t -> tm.in_use && t -> stack_base) {
  1570. # ifndef SMALL_CONFIG
  1571. ++nthreads;
  1572. # endif
  1573. total_size += GC_push_stack_for(t, thread_id);
  1574. if (t -> id == thread_id) found_me = TRUE;
  1575. }
  1576. }
  1577. } else
  1578. # endif
  1579. /* else */ {
  1580. int i;
  1581. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  1582. GC_thread t;
  1583. for (t = GC_threads[i]; t != 0; t = t -> tm.next) {
  1584. if (!KNOWN_FINISHED(t) && t -> stack_base) {
  1585. # ifndef SMALL_CONFIG
  1586. ++nthreads;
  1587. # endif
  1588. total_size += GC_push_stack_for(t, thread_id);
  1589. if (t -> id == thread_id) found_me = TRUE;
  1590. }
  1591. }
  1592. }
  1593. }
  1594. # ifndef SMALL_CONFIG
  1595. GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks%s\n", nthreads,
  1596. GC_win32_dll_threads ?
  1597. " based on DllMain thread tracking" : "");
  1598. # endif
  1599. if (!found_me && !GC_in_thread_creation)
  1600. ABORT("Collecting from unknown thread");
  1601. GC_total_stacksize = total_size;
  1602. }
  1603. #ifdef PARALLEL_MARK
  1604. # ifndef MAX_MARKERS
  1605. # define MAX_MARKERS 16
  1606. # endif
  1607. static ptr_t marker_sp[MAX_MARKERS - 1]; /* The cold end of the stack */
  1608. /* for markers. */
  1609. # ifdef IA64
  1610. static ptr_t marker_bsp[MAX_MARKERS - 1];
  1611. # endif
  1612. static ptr_t marker_last_stack_min[MAX_MARKERS - 1];
  1613. /* Last known minimum (hottest) address */
  1614. /* in stack (or ADDR_LIMIT if unset) */
  1615. /* for markers. */
  1616. #endif /* PARALLEL_MARK */
  1617. /* Find stack with the lowest address which overlaps the */
  1618. /* interval [start, limit). */
  1619. /* Return stack bounds in *lo and *hi. If no such stack */
  1620. /* is found, both *hi and *lo will be set to an address */
  1621. /* higher than limit. */
  1622. GC_INNER void GC_get_next_stack(char *start, char *limit,
  1623. char **lo, char **hi)
  1624. {
  1625. int i;
  1626. char * current_min = ADDR_LIMIT; /* Least in-range stack base */
  1627. ptr_t *plast_stack_min = NULL; /* Address of last_stack_min */
  1628. /* field for thread corresponding */
  1629. /* to current_min. */
  1630. GC_thread thread = NULL; /* Either NULL or points to the */
  1631. /* thread's hash table entry */
  1632. /* containing *plast_stack_min. */
  1633. /* First set current_min, ignoring limit. */
  1634. if (GC_win32_dll_threads) {
  1635. LONG my_max = GC_get_max_thread_index();
  1636. for (i = 0; i <= my_max; i++) {
  1637. ptr_t s = (ptr_t)(dll_thread_table[i].stack_base);
  1638. if ((word)s > (word)start && (word)s < (word)current_min) {
  1639. /* Update address of last_stack_min. */
  1640. plast_stack_min = (ptr_t * /* no volatile */)
  1641. &dll_thread_table[i].last_stack_min;
  1642. current_min = s;
  1643. # if defined(CPPCHECK)
  1644. /* To avoid a warning that thread is always null. */
  1645. thread = (GC_thread)&dll_thread_table[i];
  1646. # endif
  1647. }
  1648. }
  1649. } else {
  1650. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  1651. GC_thread t;
  1652. for (t = GC_threads[i]; t != 0; t = t -> tm.next) {
  1653. ptr_t s = t -> stack_base;
  1654. if ((word)s > (word)start && (word)s < (word)current_min) {
  1655. /* Update address of last_stack_min. */
  1656. plast_stack_min = &t -> last_stack_min;
  1657. thread = t; /* Remember current thread to unprotect. */
  1658. current_min = s;
  1659. }
  1660. }
  1661. }
  1662. # ifdef PARALLEL_MARK
  1663. for (i = 0; i < GC_markers_m1; ++i) {
  1664. ptr_t s = marker_sp[i];
  1665. # ifdef IA64
  1666. /* FIXME: not implemented */
  1667. # endif
  1668. if ((word)s > (word)start && (word)s < (word)current_min) {
  1669. GC_ASSERT(marker_last_stack_min[i] != NULL);
  1670. plast_stack_min = &marker_last_stack_min[i];
  1671. current_min = s;
  1672. thread = NULL; /* Not a thread's hash table entry. */
  1673. }
  1674. }
  1675. # endif
  1676. }
  1677. *hi = current_min;
  1678. if (current_min == ADDR_LIMIT) {
  1679. *lo = ADDR_LIMIT;
  1680. return;
  1681. }
  1682. GC_ASSERT((word)current_min > (word)start && plast_stack_min != NULL);
  1683. # ifdef MSWINCE
  1684. if (GC_dont_query_stack_min) {
  1685. *lo = GC_wince_evaluate_stack_min(current_min);
  1686. /* Keep last_stack_min value unmodified. */
  1687. return;
  1688. }
  1689. # endif
  1690. if ((word)current_min > (word)limit && !may_be_in_stack(limit)) {
  1691. /* Skip the rest since the memory region at limit address is */
  1692. /* not a stack (so the lowest address of the found stack would */
  1693. /* be above the limit value anyway). */
  1694. *lo = ADDR_LIMIT;
  1695. return;
  1696. }
  1697. /* Get the minimum address of the found stack by probing its memory */
  1698. /* region starting from the recent known minimum (if set). */
  1699. if (*plast_stack_min == ADDR_LIMIT
  1700. || !may_be_in_stack(*plast_stack_min)) {
  1701. /* Unsafe to start from last_stack_min value. */
  1702. *lo = GC_get_stack_min(current_min);
  1703. } else {
  1704. /* Use the recent value to optimize search for min address. */
  1705. *lo = GC_get_stack_min(*plast_stack_min);
  1706. }
  1707. /* Remember current stack_min value. */
  1708. if (thread != NULL) {
  1709. UNPROTECT_THREAD(thread);
  1710. }
  1711. *plast_stack_min = *lo;
  1712. }
  1713. #ifdef PARALLEL_MARK
  1714. # if defined(GC_PTHREADS) && !defined(GC_PTHREADS_PARAMARK)
  1715. /* Use pthread-based parallel mark implementation. */
  1716. /* Workaround a deadlock in winpthreads-3.0b internals (observed */
  1717. /* with MinGW 32/64). */
  1718. # if !defined(__MINGW32__)
  1719. # define GC_PTHREADS_PARAMARK
  1720. # endif
  1721. # endif
  1722. # if !defined(GC_PTHREADS_PARAMARK)
  1723. STATIC HANDLE GC_marker_cv[MAX_MARKERS - 1] = {0};
  1724. /* Events with manual reset (one for each */
  1725. /* mark helper). */
  1726. STATIC DWORD GC_marker_Id[MAX_MARKERS - 1] = {0};
  1727. /* This table is used for mapping helper */
  1728. /* threads ID to mark helper index (linear */
  1729. /* search is used since the mapping contains */
  1730. /* only a few entries). */
  1731. # endif
  1732. /* GC_mark_thread() is the same as in pthread_support.c */
  1733. # ifdef GC_PTHREADS_PARAMARK
  1734. STATIC void * GC_mark_thread(void * id)
  1735. # elif defined(MSWINCE)
  1736. STATIC DWORD WINAPI GC_mark_thread(LPVOID id)
  1737. # else
  1738. STATIC unsigned __stdcall GC_mark_thread(void * id)
  1739. # endif
  1740. {
  1741. word my_mark_no = 0;
  1742. if ((word)id == (word)-1) return 0; /* to make compiler happy */
  1743. marker_sp[(word)id] = GC_approx_sp();
  1744. # ifdef IA64
  1745. marker_bsp[(word)id] = GC_save_regs_in_stack();
  1746. # endif
  1747. # if !defined(GC_PTHREADS_PARAMARK)
  1748. GC_marker_Id[(word)id] = GetCurrentThreadId();
  1749. # endif
  1750. /* Inform GC_start_mark_threads about completion of marker data init. */
  1751. GC_acquire_mark_lock();
  1752. if (0 == --GC_fl_builder_count) /* count may have a negative value */
  1753. GC_notify_all_builder();
  1754. for (;; ++my_mark_no) {
  1755. if (my_mark_no - GC_mark_no > (word)2) {
  1756. /* resynchronize if we get far off, e.g. because GC_mark_no */
  1757. /* wrapped. */
  1758. my_mark_no = GC_mark_no;
  1759. }
  1760. # ifdef DEBUG_THREADS
  1761. GC_log_printf("Starting mark helper for mark number %lu\n",
  1762. (unsigned long)my_mark_no);
  1763. # endif
  1764. GC_help_marker(my_mark_no);
  1765. }
  1766. }
  1767. # ifndef GC_ASSERTIONS
  1768. # define SET_MARK_LOCK_HOLDER (void)0
  1769. # define UNSET_MARK_LOCK_HOLDER (void)0
  1770. # endif
  1771. /* GC_mark_threads[] is unused here unlike that in pthread_support.c */
  1772. # ifdef CAN_HANDLE_FORK
  1773. static int available_markers_m1 = 0;
  1774. # else
  1775. # define available_markers_m1 GC_markers_m1
  1776. # endif
  1777. # ifdef GC_PTHREADS_PARAMARK
  1778. # include <pthread.h>
  1779. # if defined(GC_ASSERTIONS) && !defined(NUMERIC_THREAD_ID)
  1780. # define NUMERIC_THREAD_ID(id) (unsigned long)(word)GC_PTHREAD_PTRVAL(id)
  1781. /* Id not guaranteed to be unique. */
  1782. # endif
  1783. # ifdef CAN_HANDLE_FORK
  1784. static pthread_cond_t mark_cv;
  1785. /* initialized by GC_start_mark_threads_inner */
  1786. # else
  1787. static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
  1788. # endif
  1789. /* GC_start_mark_threads is the same as in pthread_support.c except */
  1790. /* for thread stack that is assumed to be large enough. */
  1791. GC_INNER void GC_start_mark_threads_inner(void)
  1792. {
  1793. int i;
  1794. pthread_attr_t attr;
  1795. pthread_t new_thread;
  1796. # ifndef NO_MARKER_SPECIAL_SIGMASK
  1797. sigset_t set, oldset;
  1798. # endif
  1799. GC_ASSERT(I_DONT_HOLD_LOCK());
  1800. if (available_markers_m1 <= 0) return;
  1801. /* Skip if parallel markers disabled or already started. */
  1802. # ifdef CAN_HANDLE_FORK
  1803. if (GC_parallel) return;
  1804. /* Reset mark_cv state after forking (as in pthread_support.c). */
  1805. {
  1806. pthread_cond_t mark_cv_local = PTHREAD_COND_INITIALIZER;
  1807. BCOPY(&mark_cv_local, &mark_cv, sizeof(mark_cv));
  1808. }
  1809. # endif
  1810. GC_ASSERT(GC_fl_builder_count == 0);
  1811. if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
  1812. if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
  1813. ABORT("pthread_attr_setdetachstate failed");
  1814. # ifndef NO_MARKER_SPECIAL_SIGMASK
  1815. /* Apply special signal mask to GC marker threads, and don't drop */
  1816. /* user defined signals by GC marker threads. */
  1817. if (sigfillset(&set) != 0)
  1818. ABORT("sigfillset failed");
  1819. if (pthread_sigmask(SIG_BLOCK, &set, &oldset) < 0) {
  1820. WARN("pthread_sigmask set failed, no markers started,"
  1821. " errno = %" WARN_PRIdPTR "\n", errno);
  1822. GC_markers_m1 = 0;
  1823. (void)pthread_attr_destroy(&attr);
  1824. return;
  1825. }
  1826. # endif /* !NO_MARKER_SPECIAL_SIGMASK */
  1827. # ifdef CAN_HANDLE_FORK
  1828. /* To have proper GC_parallel value in GC_help_marker. */
  1829. GC_markers_m1 = available_markers_m1;
  1830. # endif
  1831. for (i = 0; i < available_markers_m1; ++i) {
  1832. marker_last_stack_min[i] = ADDR_LIMIT;
  1833. if (0 != pthread_create(&new_thread, &attr,
  1834. GC_mark_thread, (void *)(word)i)) {
  1835. WARN("Marker thread creation failed\n", 0);
  1836. /* Don't try to create other marker threads. */
  1837. GC_markers_m1 = i;
  1838. break;
  1839. }
  1840. }
  1841. # ifndef NO_MARKER_SPECIAL_SIGMASK
  1842. /* Restore previous signal mask. */
  1843. if (pthread_sigmask(SIG_SETMASK, &oldset, NULL) < 0) {
  1844. WARN("pthread_sigmask restore failed, errno = %" WARN_PRIdPTR "\n",
  1845. errno);
  1846. }
  1847. # endif
  1848. (void)pthread_attr_destroy(&attr);
  1849. GC_wait_for_markers_init();
  1850. GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
  1851. }
  1852. # ifdef GC_ASSERTIONS
  1853. STATIC unsigned long GC_mark_lock_holder = NO_THREAD;
  1854. # define SET_MARK_LOCK_HOLDER \
  1855. (void)(GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()))
  1856. # define UNSET_MARK_LOCK_HOLDER \
  1857. do { \
  1858. GC_ASSERT(GC_mark_lock_holder \
  1859. == NUMERIC_THREAD_ID(pthread_self())); \
  1860. GC_mark_lock_holder = NO_THREAD; \
  1861. } while (0)
  1862. # endif /* GC_ASSERTIONS */
  1863. static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
  1864. static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
  1865. /* GC_acquire/release_mark_lock(), GC_wait_builder/marker(), */
  1866. /* GC_wait_for_reclaim(), GC_notify_all_builder/marker() are the same */
  1867. /* as in pthread_support.c except that GC_generic_lock() is not used. */
  1868. # ifdef LOCK_STATS
  1869. volatile AO_t GC_block_count = 0;
  1870. # endif
  1871. GC_INNER void GC_acquire_mark_lock(void)
  1872. {
  1873. # if defined(NUMERIC_THREAD_ID_UNIQUE) && !defined(THREAD_SANITIZER)
  1874. GC_ASSERT(GC_mark_lock_holder != NUMERIC_THREAD_ID(pthread_self()));
  1875. # endif
  1876. if (pthread_mutex_lock(&mark_mutex) != 0) {
  1877. ABORT("pthread_mutex_lock failed");
  1878. }
  1879. # ifdef LOCK_STATS
  1880. (void)AO_fetch_and_add1(&GC_block_count);
  1881. # endif
  1882. /* GC_generic_lock(&mark_mutex); */
  1883. SET_MARK_LOCK_HOLDER;
  1884. }
  1885. GC_INNER void GC_release_mark_lock(void)
  1886. {
  1887. UNSET_MARK_LOCK_HOLDER;
  1888. if (pthread_mutex_unlock(&mark_mutex) != 0) {
  1889. ABORT("pthread_mutex_unlock failed");
  1890. }
  1891. }
  1892. /* Collector must wait for a freelist builders for 2 reasons: */
  1893. /* 1) Mark bits may still be getting examined without lock. */
  1894. /* 2) Partial free lists referenced only by locals may not be */
  1895. /* scanned correctly, e.g. if they contain "pointer-free" objects, */
  1896. /* since the free-list link may be ignored. */
  1897. STATIC void GC_wait_builder(void)
  1898. {
  1899. UNSET_MARK_LOCK_HOLDER;
  1900. if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
  1901. ABORT("pthread_cond_wait failed");
  1902. }
  1903. GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
  1904. SET_MARK_LOCK_HOLDER;
  1905. }
  1906. GC_INNER void GC_wait_for_reclaim(void)
  1907. {
  1908. GC_acquire_mark_lock();
  1909. while (GC_fl_builder_count > 0) {
  1910. GC_wait_builder();
  1911. }
  1912. GC_release_mark_lock();
  1913. }
  1914. GC_INNER void GC_notify_all_builder(void)
  1915. {
  1916. GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
  1917. if (pthread_cond_broadcast(&builder_cv) != 0) {
  1918. ABORT("pthread_cond_broadcast failed");
  1919. }
  1920. }
  1921. GC_INNER void GC_wait_marker(void)
  1922. {
  1923. GC_ASSERT(GC_parallel);
  1924. UNSET_MARK_LOCK_HOLDER;
  1925. if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
  1926. ABORT("pthread_cond_wait failed");
  1927. }
  1928. GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
  1929. SET_MARK_LOCK_HOLDER;
  1930. }
  1931. GC_INNER void GC_notify_all_marker(void)
  1932. {
  1933. GC_ASSERT(GC_parallel);
  1934. if (pthread_cond_broadcast(&mark_cv) != 0) {
  1935. ABORT("pthread_cond_broadcast failed");
  1936. }
  1937. }
  1938. # else /* ! GC_PTHREADS_PARAMARK */
  1939. # ifndef MARK_THREAD_STACK_SIZE
  1940. # define MARK_THREAD_STACK_SIZE 0 /* default value */
  1941. # endif
  1942. /* mark_mutex_event, builder_cv, mark_cv are initialized in GC_thr_init */
  1943. static HANDLE mark_mutex_event = (HANDLE)0; /* Event with auto-reset. */
  1944. static HANDLE builder_cv = (HANDLE)0; /* Event with manual reset. */
  1945. static HANDLE mark_cv = (HANDLE)0; /* Event with manual reset. */
  1946. GC_INNER void GC_start_mark_threads_inner(void)
  1947. {
  1948. int i;
  1949. GC_ASSERT(I_DONT_HOLD_LOCK());
  1950. if (available_markers_m1 <= 0) return;
  1951. GC_ASSERT(GC_fl_builder_count == 0);
  1952. /* Initialize GC_marker_cv[] fully before starting the */
  1953. /* first helper thread. */
  1954. for (i = 0; i < GC_markers_m1; ++i) {
  1955. if ((GC_marker_cv[i] = CreateEvent(NULL /* attrs */,
  1956. TRUE /* isManualReset */,
  1957. FALSE /* initialState */,
  1958. NULL /* name (A/W) */)) == (HANDLE)0)
  1959. ABORT("CreateEvent failed");
  1960. }
  1961. for (i = 0; i < GC_markers_m1; ++i) {
  1962. # if defined(MSWINCE) || defined(MSWIN_XBOX1)
  1963. HANDLE handle;
  1964. DWORD thread_id;
  1965. marker_last_stack_min[i] = ADDR_LIMIT;
  1966. /* There is no _beginthreadex() in WinCE. */
  1967. handle = CreateThread(NULL /* lpsa */,
  1968. MARK_THREAD_STACK_SIZE /* ignored */,
  1969. GC_mark_thread, (LPVOID)(word)i,
  1970. 0 /* fdwCreate */, &thread_id);
  1971. if (handle == NULL) {
  1972. WARN("Marker thread creation failed\n", 0);
  1973. /* The most probable failure reason is "not enough memory". */
  1974. /* Don't try to create other marker threads. */
  1975. break;
  1976. } else {
  1977. /* It's safe to detach the thread. */
  1978. CloseHandle(handle);
  1979. }
  1980. # else
  1981. GC_uintptr_t handle;
  1982. unsigned thread_id;
  1983. marker_last_stack_min[i] = ADDR_LIMIT;
  1984. handle = _beginthreadex(NULL /* security_attr */,
  1985. MARK_THREAD_STACK_SIZE, GC_mark_thread,
  1986. (void *)(word)i, 0 /* flags */, &thread_id);
  1987. if (!handle || handle == (GC_uintptr_t)-1L) {
  1988. WARN("Marker thread creation failed\n", 0);
  1989. /* Don't try to create other marker threads. */
  1990. break;
  1991. } else {/* We may detach the thread (if handle is of HANDLE type) */
  1992. /* CloseHandle((HANDLE)handle); */
  1993. }
  1994. # endif
  1995. }
  1996. /* Adjust GC_markers_m1 (and free unused resources) if failed. */
  1997. while (GC_markers_m1 > i) {
  1998. GC_markers_m1--;
  1999. CloseHandle(GC_marker_cv[GC_markers_m1]);
  2000. }
  2001. GC_wait_for_markers_init();
  2002. GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
  2003. if (i == 0) {
  2004. CloseHandle(mark_cv);
  2005. CloseHandle(builder_cv);
  2006. CloseHandle(mark_mutex_event);
  2007. }
  2008. }
  2009. # ifdef GC_ASSERTIONS
  2010. STATIC DWORD GC_mark_lock_holder = NO_THREAD;
  2011. # define SET_MARK_LOCK_HOLDER \
  2012. (void)(GC_mark_lock_holder = GetCurrentThreadId())
  2013. # define UNSET_MARK_LOCK_HOLDER \
  2014. do { \
  2015. GC_ASSERT(GC_mark_lock_holder == GetCurrentThreadId()); \
  2016. GC_mark_lock_holder = NO_THREAD; \
  2017. } while (0)
  2018. # endif /* GC_ASSERTIONS */
  2019. STATIC /* volatile */ LONG GC_mark_mutex_state = 0;
  2020. /* Mutex state: 0 - unlocked, */
  2021. /* 1 - locked and no other waiters, */
  2022. /* -1 - locked and waiters may exist. */
  2023. /* Accessed by InterlockedExchange(). */
  2024. /* #define LOCK_STATS */
  2025. # ifdef LOCK_STATS
  2026. volatile AO_t GC_block_count = 0;
  2027. volatile AO_t GC_unlocked_count = 0;
  2028. # endif
  2029. GC_INNER void GC_acquire_mark_lock(void)
  2030. {
  2031. # ifndef THREAD_SANITIZER
  2032. GC_ASSERT(GC_mark_lock_holder != GetCurrentThreadId());
  2033. # endif
  2034. if (InterlockedExchange(&GC_mark_mutex_state, 1 /* locked */) != 0) {
  2035. # ifdef LOCK_STATS
  2036. (void)AO_fetch_and_add1(&GC_block_count);
  2037. # endif
  2038. /* Repeatedly reset the state and wait until acquire the lock. */
  2039. while (InterlockedExchange(&GC_mark_mutex_state,
  2040. -1 /* locked_and_has_waiters */) != 0) {
  2041. if (WaitForSingleObject(mark_mutex_event, INFINITE) == WAIT_FAILED)
  2042. ABORT("WaitForSingleObject failed");
  2043. }
  2044. }
  2045. # ifdef LOCK_STATS
  2046. else {
  2047. (void)AO_fetch_and_add1(&GC_unlocked_count);
  2048. }
  2049. # endif
  2050. GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
  2051. SET_MARK_LOCK_HOLDER;
  2052. }
  2053. GC_INNER void GC_release_mark_lock(void)
  2054. {
  2055. UNSET_MARK_LOCK_HOLDER;
  2056. if (InterlockedExchange(&GC_mark_mutex_state, 0 /* unlocked */) < 0) {
  2057. /* wake a waiter */
  2058. if (SetEvent(mark_mutex_event) == FALSE)
  2059. ABORT("SetEvent failed");
  2060. }
  2061. }
  2062. /* In GC_wait_for_reclaim/GC_notify_all_builder() we emulate POSIX */
  2063. /* cond_wait/cond_broadcast() primitives with WinAPI Event object */
  2064. /* (working in "manual reset" mode). This works here because */
  2065. /* GC_notify_all_builder() is always called holding lock on */
  2066. /* mark_mutex and the checked condition (GC_fl_builder_count == 0) */
  2067. /* is the only one for which broadcasting on builder_cv is performed. */
  2068. GC_INNER void GC_wait_for_reclaim(void)
  2069. {
  2070. GC_ASSERT(builder_cv != 0);
  2071. for (;;) {
  2072. GC_acquire_mark_lock();
  2073. if (GC_fl_builder_count == 0)
  2074. break;
  2075. if (ResetEvent(builder_cv) == FALSE)
  2076. ABORT("ResetEvent failed");
  2077. GC_release_mark_lock();
  2078. if (WaitForSingleObject(builder_cv, INFINITE) == WAIT_FAILED)
  2079. ABORT("WaitForSingleObject failed");
  2080. }
  2081. GC_release_mark_lock();
  2082. }
  2083. GC_INNER void GC_notify_all_builder(void)
  2084. {
  2085. GC_ASSERT(GC_mark_lock_holder == GetCurrentThreadId());
  2086. GC_ASSERT(builder_cv != 0);
  2087. GC_ASSERT(GC_fl_builder_count == 0);
  2088. if (SetEvent(builder_cv) == FALSE)
  2089. ABORT("SetEvent failed");
  2090. }
  2091. /* mark_cv is used (for waiting) by a non-helper thread. */
  2092. GC_INNER void GC_wait_marker(void)
  2093. {
  2094. HANDLE event = mark_cv;
  2095. DWORD thread_id = GetCurrentThreadId();
  2096. int i = GC_markers_m1;
  2097. while (i-- > 0) {
  2098. if (GC_marker_Id[i] == thread_id) {
  2099. event = GC_marker_cv[i];
  2100. break;
  2101. }
  2102. }
  2103. if (ResetEvent(event) == FALSE)
  2104. ABORT("ResetEvent failed");
  2105. GC_release_mark_lock();
  2106. if (WaitForSingleObject(event, INFINITE) == WAIT_FAILED)
  2107. ABORT("WaitForSingleObject failed");
  2108. GC_acquire_mark_lock();
  2109. }
  2110. GC_INNER void GC_notify_all_marker(void)
  2111. {
  2112. DWORD thread_id = GetCurrentThreadId();
  2113. int i = GC_markers_m1;
  2114. while (i-- > 0) {
  2115. /* Notify every marker ignoring self (for efficiency). */
  2116. if (SetEvent(GC_marker_Id[i] != thread_id ? GC_marker_cv[i] :
  2117. mark_cv) == FALSE)
  2118. ABORT("SetEvent failed");
  2119. }
  2120. }
  2121. # endif /* ! GC_PTHREADS_PARAMARK */
  2122. #endif /* PARALLEL_MARK */
  2123. /* We have no DllMain to take care of new threads. Thus we */
  2124. /* must properly intercept thread creation. */
  2125. typedef struct {
  2126. LPTHREAD_START_ROUTINE start;
  2127. LPVOID param;
  2128. } thread_args;
  2129. STATIC void * GC_CALLBACK GC_win32_start_inner(struct GC_stack_base *sb,
  2130. void *arg)
  2131. {
  2132. void * ret = NULL;
  2133. LPTHREAD_START_ROUTINE start = ((thread_args *)arg)->start;
  2134. LPVOID param = ((thread_args *)arg)->param;
  2135. GC_register_my_thread(sb); /* This waits for an in-progress GC. */
  2136. # ifdef DEBUG_THREADS
  2137. GC_log_printf("thread 0x%lx starting...\n", (long)GetCurrentThreadId());
  2138. # endif
  2139. GC_free(arg);
  2140. /* Clear the thread entry even if we exit with an exception. */
  2141. /* This is probably pointless, since an uncaught exception is */
  2142. /* supposed to result in the process being killed. */
  2143. #if !defined(__GNUC__) && !defined(NO_CRT)
  2144. __try
  2145. # endif
  2146. {
  2147. ret = (void *)(word)(*start)(param);
  2148. }
  2149. #if !defined(__GNUC__) && !defined(NO_CRT)
  2150. __finally
  2151. # endif
  2152. {
  2153. GC_unregister_my_thread();
  2154. }
  2155. # ifdef DEBUG_THREADS
  2156. GC_log_printf("thread 0x%lx returned from start routine\n",
  2157. (long)GetCurrentThreadId());
  2158. # endif
  2159. return ret;
  2160. }
  2161. STATIC DWORD WINAPI GC_win32_start(LPVOID arg)
  2162. {
  2163. return (DWORD)(word)GC_call_with_stack_base(GC_win32_start_inner, arg);
  2164. }
  2165. GC_API HANDLE WINAPI GC_CreateThread(
  2166. LPSECURITY_ATTRIBUTES lpThreadAttributes,
  2167. GC_WIN32_SIZE_T dwStackSize,
  2168. LPTHREAD_START_ROUTINE lpStartAddress,
  2169. LPVOID lpParameter, DWORD dwCreationFlags,
  2170. LPDWORD lpThreadId)
  2171. {
  2172. if (!EXPECT(parallel_initialized, TRUE))
  2173. GC_init_parallel();
  2174. /* make sure GC is initialized (i.e. main thread is */
  2175. /* attached, tls initialized). */
  2176. # ifdef DEBUG_THREADS
  2177. GC_log_printf("About to create a thread from 0x%lx\n",
  2178. (long)GetCurrentThreadId());
  2179. # endif
  2180. if (GC_win32_dll_threads) {
  2181. return CreateThread(lpThreadAttributes, dwStackSize, lpStartAddress,
  2182. lpParameter, dwCreationFlags, lpThreadId);
  2183. } else {
  2184. thread_args *args =
  2185. (thread_args *)GC_malloc_uncollectable(sizeof(thread_args));
  2186. /* Handed off to and deallocated by child thread. */
  2187. HANDLE thread_h;
  2188. if (NULL == args) {
  2189. SetLastError(ERROR_NOT_ENOUGH_MEMORY);
  2190. return NULL;
  2191. }
  2192. /* set up thread arguments */
  2193. args -> start = lpStartAddress;
  2194. args -> param = lpParameter;
  2195. GC_dirty(args);
  2196. set_need_to_lock();
  2197. thread_h = CreateThread(lpThreadAttributes, dwStackSize, GC_win32_start,
  2198. args, dwCreationFlags, lpThreadId);
  2199. if (thread_h == 0) GC_free(args);
  2200. return thread_h;
  2201. }
  2202. }
  2203. GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread(DWORD dwExitCode)
  2204. {
  2205. GC_unregister_my_thread();
  2206. ExitThread(dwExitCode);
  2207. }
  2208. # if !defined(NO_CRT) && !defined(CYGWIN32) && !defined(MSWINCE) && !defined(MSWIN_XBOX1)
  2209. GC_API GC_uintptr_t GC_CALL GC_beginthreadex(
  2210. void *security, unsigned stack_size,
  2211. unsigned (__stdcall *start_address)(void *),
  2212. void *arglist, unsigned initflag,
  2213. unsigned *thrdaddr)
  2214. {
  2215. if (!EXPECT(parallel_initialized, TRUE))
  2216. GC_init_parallel();
  2217. /* make sure GC is initialized (i.e. main thread is */
  2218. /* attached, tls initialized). */
  2219. # ifdef DEBUG_THREADS
  2220. GC_log_printf("About to create a thread from 0x%lx\n",
  2221. (long)GetCurrentThreadId());
  2222. # endif
  2223. if (GC_win32_dll_threads) {
  2224. return _beginthreadex(security, stack_size, start_address,
  2225. arglist, initflag, thrdaddr);
  2226. } else {
  2227. GC_uintptr_t thread_h;
  2228. thread_args *args =
  2229. (thread_args *)GC_malloc_uncollectable(sizeof(thread_args));
  2230. /* Handed off to and deallocated by child thread. */
  2231. if (NULL == args) {
  2232. /* MSDN docs say _beginthreadex() returns 0 on error and sets */
  2233. /* errno to either EAGAIN (too many threads) or EINVAL (the */
  2234. /* argument is invalid or the stack size is incorrect), so we */
  2235. /* set errno to EAGAIN on "not enough memory". */
  2236. errno = EAGAIN;
  2237. return 0;
  2238. }
  2239. /* set up thread arguments */
  2240. args -> start = (LPTHREAD_START_ROUTINE)start_address;
  2241. args -> param = arglist;
  2242. GC_dirty(args);
  2243. set_need_to_lock();
  2244. thread_h = _beginthreadex(security, stack_size,
  2245. (unsigned (__stdcall *)(void *))GC_win32_start,
  2246. args, initflag, thrdaddr);
  2247. if (thread_h == 0) GC_free(args);
  2248. return thread_h;
  2249. }
  2250. }
  2251. GC_API void GC_CALL GC_endthreadex(unsigned retval)
  2252. {
  2253. GC_unregister_my_thread();
  2254. _endthreadex(retval);
  2255. }
  2256. # endif /* !CYGWIN32 && !MSWINCE && !MSWIN_XBOX1 */
  2257. #ifdef GC_WINMAIN_REDIRECT
  2258. /* This might be useful on WinCE. Shouldn't be used with GC_DLL. */
  2259. # if defined(MSWINCE) && defined(UNDER_CE)
  2260. # define WINMAIN_LPTSTR LPWSTR
  2261. # else
  2262. # define WINMAIN_LPTSTR LPSTR
  2263. # endif
  2264. /* This is defined in gc.h. */
  2265. # undef WinMain
  2266. /* Defined outside GC by an application. */
  2267. int WINAPI GC_WinMain(HINSTANCE, HINSTANCE, WINMAIN_LPTSTR, int);
  2268. typedef struct {
  2269. HINSTANCE hInstance;
  2270. HINSTANCE hPrevInstance;
  2271. WINMAIN_LPTSTR lpCmdLine;
  2272. int nShowCmd;
  2273. } main_thread_args;
  2274. static DWORD WINAPI main_thread_start(LPVOID arg)
  2275. {
  2276. main_thread_args * args = (main_thread_args *) arg;
  2277. return (DWORD)GC_WinMain(args->hInstance, args->hPrevInstance,
  2278. args->lpCmdLine, args->nShowCmd);
  2279. }
  2280. STATIC void * GC_waitForSingleObjectInfinite(void * handle)
  2281. {
  2282. return (void *)(word)WaitForSingleObject((HANDLE)handle, INFINITE);
  2283. }
  2284. # ifndef WINMAIN_THREAD_STACK_SIZE
  2285. # define WINMAIN_THREAD_STACK_SIZE 0 /* default value */
  2286. # endif
  2287. int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
  2288. WINMAIN_LPTSTR lpCmdLine, int nShowCmd)
  2289. {
  2290. DWORD exit_code = 1;
  2291. main_thread_args args = {
  2292. hInstance, hPrevInstance, lpCmdLine, nShowCmd
  2293. };
  2294. HANDLE thread_h;
  2295. DWORD thread_id;
  2296. /* initialize everything */
  2297. GC_INIT();
  2298. /* start the main thread */
  2299. thread_h = GC_CreateThread(NULL /* lpsa */,
  2300. WINMAIN_THREAD_STACK_SIZE /* ignored on WinCE */,
  2301. main_thread_start, &args, 0 /* fdwCreate */,
  2302. &thread_id);
  2303. if (thread_h != NULL) {
  2304. if ((DWORD)(word)GC_do_blocking(GC_waitForSingleObjectInfinite,
  2305. (void *)thread_h) == WAIT_FAILED)
  2306. ABORT("WaitForSingleObject(main_thread) failed");
  2307. GetExitCodeThread (thread_h, &exit_code);
  2308. CloseHandle (thread_h);
  2309. } else {
  2310. ABORT("GC_CreateThread(main_thread) failed");
  2311. }
  2312. # ifdef MSWINCE
  2313. GC_deinit();
  2314. # endif
  2315. return (int) exit_code;
  2316. }
  2317. #endif /* GC_WINMAIN_REDIRECT */
  2318. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  2319. # ifdef MSWINRT_FLAVOR
  2320. /* available on WinRT but we have to forward declare to use */
  2321. __declspec(dllimport) HMODULE WINAPI GetModuleHandleW(LPCWSTR lpModuleName);
  2322. # endif
  2323. STATIC BOOL is_wow64_process(void)
  2324. {
  2325. /* try to use IsWow64Process2 as it handles different Wow cases */
  2326. HMODULE hWow64 = GetModuleHandleW(L"api-ms-win-core-wow64-l1-1-1.dll");
  2327. if (hWow64) {
  2328. FARPROC pfn = GetProcAddress(hWow64, "IsWow64Process2");
  2329. if (pfn) {
  2330. USHORT process_machine, native_machine;
  2331. if ((*(BOOL (WINAPI*)(HANDLE, USHORT*, USHORT*))pfn)(GetCurrentProcess(), &process_machine, &native_machine)) {
  2332. return (process_machine != native_machine);
  2333. }
  2334. }
  2335. }
  2336. {
  2337. BOOL is_wow64 = FALSE;
  2338. if (IsWow64Process(GetCurrentProcess(), &is_wow64))
  2339. return is_wow64;
  2340. }
  2341. return FALSE;
  2342. }
  2343. # endif
  2344. GC_INNER void GC_thr_init(void)
  2345. {
  2346. struct GC_stack_base sb;
  2347. # ifdef GC_ASSERTIONS
  2348. int sb_result;
  2349. # endif
  2350. GC_ASSERT(I_HOLD_LOCK());
  2351. if (GC_thr_initialized) return;
  2352. GC_ASSERT((word)&GC_threads % sizeof(word) == 0);
  2353. GC_main_thread = GetCurrentThreadId();
  2354. GC_thr_initialized = TRUE;
  2355. # ifdef CAN_HANDLE_FORK
  2356. /* Prepare for forks if requested. */
  2357. if (GC_handle_fork) {
  2358. # ifdef CAN_CALL_ATFORK
  2359. if (pthread_atfork(fork_prepare_proc, fork_parent_proc,
  2360. fork_child_proc) == 0) {
  2361. /* Handlers successfully registered. */
  2362. GC_handle_fork = 1;
  2363. } else
  2364. # endif
  2365. /* else */ if (GC_handle_fork != -1)
  2366. ABORT("pthread_atfork failed");
  2367. }
  2368. # endif
  2369. # ifdef WOW64_THREAD_CONTEXT_WORKAROUND
  2370. /* Set isWow64 flag. */
  2371. isWow64 = is_wow64_process();
  2372. # endif
  2373. /* Add the initial thread, so we can stop it. */
  2374. # ifdef GC_ASSERTIONS
  2375. sb_result =
  2376. # endif
  2377. GC_get_stack_base(&sb);
  2378. GC_ASSERT(sb_result == GC_SUCCESS);
  2379. # if defined(PARALLEL_MARK)
  2380. {
  2381. char * markers_string = GETENV("GC_MARKERS");
  2382. int markers;
  2383. if (markers_string != NULL) {
  2384. markers = atoi(markers_string);
  2385. if (markers <= 0 || markers > MAX_MARKERS) {
  2386. WARN("Too big or invalid number of mark threads: %" WARN_PRIdPTR
  2387. "; using maximum threads\n", (signed_word)markers);
  2388. markers = MAX_MARKERS;
  2389. }
  2390. } else {
  2391. # ifdef MSWINCE
  2392. /* There is no GetProcessAffinityMask() in WinCE. */
  2393. /* GC_sysinfo is already initialized. */
  2394. markers = (int)GC_sysinfo.dwNumberOfProcessors;
  2395. # else
  2396. # ifdef _WIN64
  2397. DWORD_PTR procMask = 0;
  2398. DWORD_PTR sysMask;
  2399. # else
  2400. DWORD procMask = 0;
  2401. DWORD sysMask;
  2402. # endif
  2403. int ncpu = 0;
  2404. if (
  2405. # ifdef __cplusplus
  2406. GetProcessAffinityMask(GetCurrentProcess(), &procMask, &sysMask)
  2407. # else
  2408. /* Cast args to void* for compatibility with some old SDKs. */
  2409. GetProcessAffinityMask(GetCurrentProcess(),
  2410. (void *)&procMask, (void *)&sysMask)
  2411. # endif
  2412. && procMask) {
  2413. do {
  2414. ncpu++;
  2415. } while ((procMask &= procMask - 1) != 0);
  2416. }
  2417. markers = ncpu;
  2418. # endif
  2419. # if defined(GC_MIN_MARKERS) && !defined(CPPCHECK)
  2420. /* This is primarily for testing on systems without getenv(). */
  2421. if (markers < GC_MIN_MARKERS)
  2422. markers = GC_MIN_MARKERS;
  2423. # endif
  2424. if (markers > MAX_MARKERS)
  2425. markers = MAX_MARKERS; /* silently limit the value */
  2426. }
  2427. available_markers_m1 = markers - 1;
  2428. }
  2429. /* Check whether parallel mode could be enabled. */
  2430. {
  2431. if (GC_win32_dll_threads || available_markers_m1 <= 0) {
  2432. /* Disable parallel marking. */
  2433. GC_parallel = FALSE;
  2434. GC_COND_LOG_PRINTF(
  2435. "Single marker thread, turning off parallel marking\n");
  2436. } else {
  2437. # ifndef GC_PTHREADS_PARAMARK
  2438. /* Initialize Win32 event objects for parallel marking. */
  2439. mark_mutex_event = CreateEvent(NULL /* attrs */,
  2440. FALSE /* isManualReset */,
  2441. FALSE /* initialState */, NULL /* name */);
  2442. builder_cv = CreateEvent(NULL /* attrs */,
  2443. TRUE /* isManualReset */,
  2444. FALSE /* initialState */, NULL /* name */);
  2445. mark_cv = CreateEvent(NULL /* attrs */, TRUE /* isManualReset */,
  2446. FALSE /* initialState */, NULL /* name */);
  2447. if (mark_mutex_event == (HANDLE)0 || builder_cv == (HANDLE)0
  2448. || mark_cv == (HANDLE)0)
  2449. ABORT("CreateEvent failed");
  2450. # endif
  2451. /* Disable true incremental collection, but generational is OK. */
  2452. GC_time_limit = GC_TIME_UNLIMITED;
  2453. }
  2454. }
  2455. # endif /* PARALLEL_MARK */
  2456. GC_ASSERT(0 == GC_lookup_thread_inner(GC_main_thread));
  2457. GC_register_my_thread_inner(&sb, GC_main_thread);
  2458. }
  2459. #ifdef GC_PTHREADS
  2460. struct start_info {
  2461. void *(*start_routine)(void *);
  2462. void *arg;
  2463. GC_bool detached;
  2464. };
  2465. GC_API int GC_pthread_join(pthread_t pthread_id, void **retval)
  2466. {
  2467. int result;
  2468. GC_thread t;
  2469. DCL_LOCK_STATE;
  2470. GC_ASSERT(!GC_win32_dll_threads);
  2471. # ifdef DEBUG_THREADS
  2472. GC_log_printf("thread %p(0x%lx) is joining thread %p\n",
  2473. (void *)GC_PTHREAD_PTRVAL(pthread_self()),
  2474. (long)GetCurrentThreadId(),
  2475. (void *)GC_PTHREAD_PTRVAL(pthread_id));
  2476. # endif
  2477. /* Thread being joined might not have registered itself yet. */
  2478. /* After the join, thread id may have been recycled. */
  2479. /* FIXME: It would be better if this worked more like */
  2480. /* pthread_support.c. */
  2481. # ifndef GC_WIN32_PTHREADS
  2482. while ((t = GC_lookup_pthread(pthread_id)) == 0)
  2483. Sleep(10);
  2484. # endif
  2485. result = pthread_join(pthread_id, retval);
  2486. if (0 == result) {
  2487. # ifdef GC_WIN32_PTHREADS
  2488. /* pthreads-win32 and winpthreads id are unique (not recycled). */
  2489. t = GC_lookup_pthread(pthread_id);
  2490. if (NULL == t) ABORT("Thread not registered");
  2491. # endif
  2492. LOCK();
  2493. if ((t -> flags & FINISHED) != 0) {
  2494. GC_delete_gc_thread_no_free(t);
  2495. GC_INTERNAL_FREE(t);
  2496. }
  2497. UNLOCK();
  2498. }
  2499. # ifdef DEBUG_THREADS
  2500. GC_log_printf("thread %p(0x%lx) join with thread %p %s\n",
  2501. (void *)GC_PTHREAD_PTRVAL(pthread_self()),
  2502. (long)GetCurrentThreadId(),
  2503. (void *)GC_PTHREAD_PTRVAL(pthread_id),
  2504. result != 0 ? "failed" : "succeeded");
  2505. # endif
  2506. return result;
  2507. }
  2508. /* Cygwin-pthreads calls CreateThread internally, but it's not easily */
  2509. /* interceptable by us..., so intercept pthread_create instead. */
  2510. GC_API int GC_pthread_create(pthread_t *new_thread,
  2511. GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
  2512. void *(*start_routine)(void *), void *arg)
  2513. {
  2514. int result;
  2515. struct start_info * si;
  2516. if (!EXPECT(parallel_initialized, TRUE))
  2517. GC_init_parallel();
  2518. /* make sure GC is initialized (i.e. main thread is attached) */
  2519. GC_ASSERT(!GC_win32_dll_threads);
  2520. /* This is otherwise saved only in an area mmapped by the thread */
  2521. /* library, which isn't visible to the collector. */
  2522. si = (struct start_info *)GC_malloc_uncollectable(
  2523. sizeof(struct start_info));
  2524. if (NULL == si)
  2525. return EAGAIN;
  2526. si -> start_routine = start_routine;
  2527. si -> arg = arg;
  2528. GC_dirty(si);
  2529. if (attr != 0 &&
  2530. pthread_attr_getdetachstate(attr, &si->detached)
  2531. == PTHREAD_CREATE_DETACHED) {
  2532. si->detached = TRUE;
  2533. }
  2534. # ifdef DEBUG_THREADS
  2535. GC_log_printf("About to create a thread from %p(0x%lx)\n",
  2536. (void *)GC_PTHREAD_PTRVAL(pthread_self()),
  2537. (long)GetCurrentThreadId());
  2538. # endif
  2539. set_need_to_lock();
  2540. result = pthread_create(new_thread, attr, GC_pthread_start, si);
  2541. if (result) { /* failure */
  2542. GC_free(si);
  2543. }
  2544. return(result);
  2545. }
  2546. STATIC void * GC_CALLBACK GC_pthread_start_inner(struct GC_stack_base *sb,
  2547. void * arg)
  2548. {
  2549. struct start_info * si = (struct start_info *)arg;
  2550. void * result;
  2551. void *(*start)(void *);
  2552. void *start_arg;
  2553. DWORD thread_id = GetCurrentThreadId();
  2554. pthread_t pthread_id = pthread_self();
  2555. GC_thread me;
  2556. DCL_LOCK_STATE;
  2557. # ifdef DEBUG_THREADS
  2558. GC_log_printf("thread %p(0x%x) starting...\n",
  2559. (void *)GC_PTHREAD_PTRVAL(pthread_id), (int)thread_id);
  2560. # endif
  2561. GC_ASSERT(!GC_win32_dll_threads);
  2562. /* If a GC occurs before the thread is registered, that GC will */
  2563. /* ignore this thread. That's fine, since it will block trying to */
  2564. /* acquire the allocation lock, and won't yet hold interesting */
  2565. /* pointers. */
  2566. LOCK();
  2567. /* We register the thread here instead of in the parent, so that */
  2568. /* we don't need to hold the allocation lock during pthread_create. */
  2569. me = GC_register_my_thread_inner(sb, thread_id);
  2570. SET_PTHREAD_MAP_CACHE(pthread_id, thread_id);
  2571. GC_ASSERT(me != &first_thread);
  2572. me -> pthread_id = pthread_id;
  2573. if (si->detached) me -> flags |= DETACHED;
  2574. UNLOCK();
  2575. start = si -> start_routine;
  2576. start_arg = si -> arg;
  2577. GC_free(si); /* was allocated uncollectible */
  2578. pthread_cleanup_push(GC_thread_exit_proc, (void *)me);
  2579. result = (*start)(start_arg);
  2580. me -> status = result;
  2581. GC_dirty(me);
  2582. pthread_cleanup_pop(1);
  2583. # ifdef DEBUG_THREADS
  2584. GC_log_printf("thread %p(0x%x) returned from start routine\n",
  2585. (void *)GC_PTHREAD_PTRVAL(pthread_id), (int)thread_id);
  2586. # endif
  2587. return(result);
  2588. }
  2589. STATIC void * GC_pthread_start(void * arg)
  2590. {
  2591. return GC_call_with_stack_base(GC_pthread_start_inner, arg);
  2592. }
  2593. STATIC void GC_thread_exit_proc(void *arg)
  2594. {
  2595. GC_thread me = (GC_thread)arg;
  2596. DCL_LOCK_STATE;
  2597. GC_ASSERT(!GC_win32_dll_threads);
  2598. # ifdef DEBUG_THREADS
  2599. GC_log_printf("thread %p(0x%lx) called pthread_exit()\n",
  2600. (void *)GC_PTHREAD_PTRVAL(pthread_self()),
  2601. (long)GetCurrentThreadId());
  2602. # endif
  2603. LOCK();
  2604. GC_wait_for_gc_completion(FALSE);
  2605. # if defined(THREAD_LOCAL_ALLOC)
  2606. GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
  2607. GC_destroy_thread_local(&(me->tlfs));
  2608. # endif
  2609. if (me -> flags & DETACHED) {
  2610. GC_delete_thread(GetCurrentThreadId());
  2611. } else {
  2612. /* deallocate it as part of join */
  2613. me -> flags |= FINISHED;
  2614. }
  2615. # if defined(THREAD_LOCAL_ALLOC)
  2616. /* It is required to call remove_specific defined in specific.c. */
  2617. GC_remove_specific(GC_thread_key);
  2618. # endif
  2619. UNLOCK();
  2620. }
  2621. # ifndef GC_NO_PTHREAD_SIGMASK
  2622. /* Win32 pthread does not support sigmask. */
  2623. /* So, nothing required here... */
  2624. GC_API int GC_pthread_sigmask(int how, const sigset_t *set,
  2625. sigset_t *oset)
  2626. {
  2627. return pthread_sigmask(how, set, oset);
  2628. }
  2629. # endif /* !GC_NO_PTHREAD_SIGMASK */
  2630. GC_API int GC_pthread_detach(pthread_t thread)
  2631. {
  2632. int result;
  2633. GC_thread t;
  2634. DCL_LOCK_STATE;
  2635. GC_ASSERT(!GC_win32_dll_threads);
  2636. /* The thread might not have registered itself yet. */
  2637. /* TODO: Wait for registration of the created thread in pthread_create. */
  2638. while ((t = GC_lookup_pthread(thread)) == NULL)
  2639. Sleep(10);
  2640. result = pthread_detach(thread);
  2641. if (result == 0) {
  2642. LOCK();
  2643. t -> flags |= DETACHED;
  2644. /* Here the pthread thread id may have been recycled. */
  2645. if ((t -> flags & FINISHED) != 0) {
  2646. GC_delete_gc_thread_no_free(t);
  2647. GC_INTERNAL_FREE(t);
  2648. }
  2649. UNLOCK();
  2650. }
  2651. return result;
  2652. }
  2653. #elif !defined(GC_NO_THREADS_DISCOVERY)
  2654. /* We avoid acquiring locks here, since this doesn't seem to be */
  2655. /* preemptible. This may run with an uninitialized collector, in */
  2656. /* which case we don't do much. This implies that no threads other */
  2657. /* than the main one should be created with an uninitialized */
  2658. /* collector. (The alternative of initializing the collector here */
  2659. /* seems dangerous, since DllMain is limited in what it can do.) */
  2660. # ifdef GC_INSIDE_DLL
  2661. /* Export only if needed by client. */
  2662. GC_API
  2663. # else
  2664. # define GC_DllMain DllMain
  2665. # endif
  2666. BOOL WINAPI GC_DllMain(HINSTANCE inst GC_ATTR_UNUSED, ULONG reason,
  2667. LPVOID reserved GC_ATTR_UNUSED)
  2668. {
  2669. DWORD thread_id;
  2670. /* Note that GC_use_threads_discovery should be called by the */
  2671. /* client application at start-up to activate automatic thread */
  2672. /* registration (it is the default GC behavior since v7.0alpha7); */
  2673. /* to always have automatic thread registration turned on, the GC */
  2674. /* should be compiled with -D GC_DISCOVER_TASK_THREADS. */
  2675. if (!GC_win32_dll_threads && parallel_initialized) return TRUE;
  2676. switch (reason) {
  2677. case DLL_THREAD_ATTACH:
  2678. # ifdef PARALLEL_MARK
  2679. /* Don't register marker threads. */
  2680. if (GC_parallel) {
  2681. /* We could reach here only if parallel_initialized == FALSE. */
  2682. break;
  2683. }
  2684. # endif
  2685. /* FALLTHRU */
  2686. case DLL_PROCESS_ATTACH:
  2687. /* This may run with the collector uninitialized. */
  2688. thread_id = GetCurrentThreadId();
  2689. if (parallel_initialized && GC_main_thread != thread_id) {
  2690. # ifdef PARALLEL_MARK
  2691. ABORT("Cannot initialize parallel marker from DllMain");
  2692. # else
  2693. struct GC_stack_base sb;
  2694. /* Don't lock here. */
  2695. # ifdef GC_ASSERTIONS
  2696. int sb_result =
  2697. # endif
  2698. GC_get_stack_base(&sb);
  2699. GC_ASSERT(sb_result == GC_SUCCESS);
  2700. GC_register_my_thread_inner(&sb, thread_id);
  2701. # endif
  2702. } /* o.w. we already did it during GC_thr_init, called by GC_init */
  2703. break;
  2704. case DLL_THREAD_DETACH:
  2705. /* We are hopefully running in the context of the exiting thread. */
  2706. GC_ASSERT(parallel_initialized);
  2707. if (GC_win32_dll_threads) {
  2708. GC_delete_thread(GetCurrentThreadId());
  2709. }
  2710. break;
  2711. case DLL_PROCESS_DETACH:
  2712. if (GC_win32_dll_threads) {
  2713. int i;
  2714. int my_max = (int)GC_get_max_thread_index();
  2715. for (i = 0; i <= my_max; ++i) {
  2716. if (AO_load(&(dll_thread_table[i].tm.in_use)))
  2717. GC_delete_gc_thread_no_free(&dll_thread_table[i]);
  2718. }
  2719. GC_deinit();
  2720. }
  2721. break;
  2722. }
  2723. return TRUE;
  2724. }
  2725. #endif /* !GC_NO_THREADS_DISCOVERY && !GC_PTHREADS */
  2726. /* Perform all initializations, including those that */
  2727. /* may require allocation. */
  2728. /* Called without allocation lock. */
  2729. /* Must be called before a second thread is created. */
  2730. GC_INNER void GC_init_parallel(void)
  2731. {
  2732. # if defined(THREAD_LOCAL_ALLOC)
  2733. GC_thread me;
  2734. DCL_LOCK_STATE;
  2735. # endif
  2736. if (parallel_initialized) return;
  2737. parallel_initialized = TRUE;
  2738. /* GC_init() calls us back, so set flag first. */
  2739. if (!GC_is_initialized) GC_init();
  2740. # if defined(CPPCHECK) && !defined(GC_NO_THREADS_DISCOVERY)
  2741. GC_noop1((word)&GC_DllMain);
  2742. # endif
  2743. if (GC_win32_dll_threads) {
  2744. set_need_to_lock();
  2745. /* Cannot intercept thread creation. Hence we don't know if */
  2746. /* other threads exist. However, client is not allowed to */
  2747. /* create other threads before collector initialization. */
  2748. /* Thus it's OK not to lock before this. */
  2749. }
  2750. /* Initialize thread local free lists if used. */
  2751. # if defined(THREAD_LOCAL_ALLOC)
  2752. LOCK();
  2753. me = GC_lookup_thread_inner(GetCurrentThreadId());
  2754. CHECK_LOOKUP_MY_THREAD(me);
  2755. GC_init_thread_local(&me->tlfs);
  2756. UNLOCK();
  2757. # endif
  2758. }
  2759. #if defined(USE_PTHREAD_LOCKS)
  2760. /* Support for pthread locking code. */
  2761. /* pthread_mutex_trylock may not win here, */
  2762. /* due to builtin support for spinning first? */
  2763. GC_INNER void GC_lock(void)
  2764. {
  2765. pthread_mutex_lock(&GC_allocate_ml);
  2766. }
  2767. #endif /* USE_PTHREAD_LOCKS */
  2768. #if defined(THREAD_LOCAL_ALLOC)
  2769. /* Add thread-local allocation support. VC++ uses __declspec(thread). */
  2770. /* We must explicitly mark ptrfree and gcj free lists, since the free */
  2771. /* list links wouldn't otherwise be found. We also set them in the */
  2772. /* normal free lists, since that involves touching less memory than if */
  2773. /* we scanned them normally. */
  2774. GC_INNER void GC_mark_thread_local_free_lists(void)
  2775. {
  2776. int i;
  2777. GC_thread p;
  2778. for (i = 0; i < THREAD_TABLE_SZ; ++i) {
  2779. for (p = GC_threads[i]; 0 != p; p = p -> tm.next) {
  2780. if (!KNOWN_FINISHED(p)) {
  2781. # ifdef DEBUG_THREADS
  2782. GC_log_printf("Marking thread locals for 0x%x\n", (int)p -> id);
  2783. # endif
  2784. GC_mark_thread_local_fls_for(&(p->tlfs));
  2785. }
  2786. }
  2787. }
  2788. }
  2789. # if defined(GC_ASSERTIONS)
  2790. /* Check that all thread-local free-lists are completely marked. */
  2791. /* also check that thread-specific-data structures are marked. */
  2792. void GC_check_tls(void)
  2793. {
  2794. int i;
  2795. GC_thread p;
  2796. for (i = 0; i < THREAD_TABLE_SZ; ++i) {
  2797. for (p = GC_threads[i]; 0 != p; p = p -> tm.next) {
  2798. if (!KNOWN_FINISHED(p))
  2799. GC_check_tls_for(&(p->tlfs));
  2800. }
  2801. }
  2802. # if defined(USE_CUSTOM_SPECIFIC)
  2803. if (GC_thread_key != 0)
  2804. GC_check_tsd_marks(GC_thread_key);
  2805. # endif
  2806. }
  2807. # endif /* GC_ASSERTIONS */
  2808. #endif /* THREAD_LOCAL_ALLOC ... */
  2809. # ifndef GC_NO_THREAD_REDIRECTS
  2810. /* Restore thread calls redirection. */
  2811. # define CreateThread GC_CreateThread
  2812. # define ExitThread GC_ExitThread
  2813. # undef _beginthreadex
  2814. # define _beginthreadex GC_beginthreadex
  2815. # undef _endthreadex
  2816. # define _endthreadex GC_endthreadex
  2817. # endif /* !GC_NO_THREAD_REDIRECTS */
  2818. #endif /* GC_WIN32_THREADS */