darwin_stop_world.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
  5. * Copyright (c) 2000-2010 by Hewlett-Packard Development Company.
  6. * All rights reserved.
  7. *
  8. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  9. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  10. *
  11. * Permission is hereby granted to use or copy this program
  12. * for any purpose, provided the above notices are retained on all copies.
  13. * Permission to modify the code and to distribute modified code is granted,
  14. * provided the above notices are retained, and a notice that the code was
  15. * modified is included with the above copyright notice.
  16. */
  17. #include "private/pthread_support.h"
  18. /* This probably needs more porting work to ppc64. */
  19. #if defined(GC_DARWIN_THREADS)
  20. #include <sys/sysctl.h>
  21. #include <mach/machine.h>
  22. #include <CoreFoundation/CoreFoundation.h>
  23. /* From "Inside Mac OS X - Mach-O Runtime Architecture" published by Apple
  24. Page 49:
  25. "The space beneath the stack pointer, where a new stack frame would normally
  26. be allocated, is called the red zone. This area as shown in Figure 3-2 may
  27. be used for any purpose as long as a new stack frame does not need to be
  28. added to the stack."
  29. Page 50: "If a leaf procedure's red zone usage would exceed 224 bytes, then
  30. it must set up a stack frame just like routines that call other routines."
  31. */
  32. #ifdef POWERPC
  33. # if CPP_WORDSZ == 32
  34. # define PPC_RED_ZONE_SIZE 224
  35. # elif CPP_WORDSZ == 64
  36. # define PPC_RED_ZONE_SIZE 320
  37. # endif
  38. #endif
  39. #ifndef DARWIN_DONT_PARSE_STACK
  40. typedef struct StackFrame {
  41. unsigned long savedSP;
  42. unsigned long savedCR;
  43. unsigned long savedLR;
  44. unsigned long reserved[2];
  45. unsigned long savedRTOC;
  46. } StackFrame;
  47. GC_INNER ptr_t GC_FindTopOfStack(unsigned long stack_start)
  48. {
  49. StackFrame *frame = (StackFrame *)stack_start;
  50. if (stack_start == 0) {
  51. # ifdef POWERPC
  52. # if CPP_WORDSZ == 32
  53. __asm__ __volatile__ ("lwz %0,0(r1)" : "=r" (frame));
  54. # else
  55. __asm__ __volatile__ ("ld %0,0(r1)" : "=r" (frame));
  56. # endif
  57. # elif defined(ARM32)
  58. volatile ptr_t sp_reg;
  59. __asm__ __volatile__ ("mov %0, r7\n" : "=r" (sp_reg));
  60. frame = (StackFrame *)sp_reg;
  61. # elif defined(AARCH64)
  62. volatile ptr_t sp_reg;
  63. __asm__ __volatile__ ("mov %0, x29\n" : "=r" (sp_reg));
  64. frame = (StackFrame *)sp_reg;
  65. # else
  66. ABORT("GC_FindTopOfStack(0) is not implemented");
  67. # endif
  68. }
  69. # ifdef DEBUG_THREADS_EXTRA
  70. GC_log_printf("FindTopOfStack start at sp = %p\n", (void *)frame);
  71. # endif
  72. while (frame->savedSP != 0) {
  73. /* if there are no more stack frames, stop */
  74. frame = (StackFrame*)frame->savedSP;
  75. /* we do these next two checks after going to the next frame
  76. because the LR for the first stack frame in the loop
  77. is not set up on purpose, so we shouldn't check it. */
  78. if ((frame->savedLR & ~0x3) == 0 || (frame->savedLR & ~0x3) == ~0x3UL)
  79. break; /* if the next LR is bogus, stop */
  80. }
  81. # ifdef DEBUG_THREADS_EXTRA
  82. GC_log_printf("FindTopOfStack finish at sp = %p\n", (void *)frame);
  83. # endif
  84. return (ptr_t)frame;
  85. }
  86. #endif /* !DARWIN_DONT_PARSE_STACK */
  87. /* GC_query_task_threads controls whether to obtain the list of */
  88. /* the threads from the kernel or to use GC_threads table. */
  89. #ifdef GC_NO_THREADS_DISCOVERY
  90. # define GC_query_task_threads FALSE
  91. #elif defined(GC_DISCOVER_TASK_THREADS)
  92. # define GC_query_task_threads TRUE
  93. #else
  94. STATIC GC_bool GC_query_task_threads = FALSE;
  95. #endif /* !GC_NO_THREADS_DISCOVERY */
  96. /* Use implicit threads registration (all task threads excluding the GC */
  97. /* special ones are stopped and scanned). Should be called before */
  98. /* GC_INIT() (or, at least, before going multi-threaded). Deprecated. */
  99. GC_API void GC_CALL GC_use_threads_discovery(void)
  100. {
  101. # if defined(GC_NO_THREADS_DISCOVERY) || defined(DARWIN_DONT_PARSE_STACK)
  102. ABORT("Darwin task-threads-based stop and push unsupported");
  103. # else
  104. # ifndef GC_ALWAYS_MULTITHREADED
  105. GC_ASSERT(!GC_need_to_lock);
  106. # endif
  107. # ifndef GC_DISCOVER_TASK_THREADS
  108. GC_query_task_threads = TRUE;
  109. # endif
  110. GC_init_parallel(); /* just to be consistent with Win32 one */
  111. # endif
  112. }
  113. #ifndef kCFCoreFoundationVersionNumber_iOS_8_0
  114. # define kCFCoreFoundationVersionNumber_iOS_8_0 1140.1
  115. #endif
  116. /* Evaluates the stack range for a given thread. Returns the lower */
  117. /* bound and sets *phi to the upper one. */
  118. STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
  119. GC_bool thread_blocked, mach_port_t my_thread,
  120. ptr_t *paltstack_lo,
  121. ptr_t *paltstack_hi GC_ATTR_UNUSED)
  122. {
  123. ptr_t lo;
  124. if (thread == my_thread) {
  125. GC_ASSERT(!thread_blocked);
  126. lo = GC_approx_sp();
  127. # ifndef DARWIN_DONT_PARSE_STACK
  128. *phi = GC_FindTopOfStack(0);
  129. # endif
  130. } else if (thread_blocked) {
  131. # if defined(CPPCHECK)
  132. if (NULL == p) ABORT("Invalid GC_thread passed to GC_stack_range_for");
  133. # endif
  134. lo = p->stop_info.stack_ptr;
  135. # ifndef DARWIN_DONT_PARSE_STACK
  136. *phi = p->topOfStack;
  137. # endif
  138. } else {
  139. /* MACHINE_THREAD_STATE_COUNT does not seem to be defined */
  140. /* everywhere. Hence we use our own version. Alternatively, */
  141. /* we could use THREAD_STATE_MAX (but seems to be not optimal). */
  142. kern_return_t kern_result;
  143. GC_THREAD_STATE_T state;
  144. # if defined(ARM32) && defined(ARM_THREAD_STATE32)
  145. /* Use ARM_UNIFIED_THREAD_STATE on iOS8+ 32-bit targets and on */
  146. /* 64-bit H/W (iOS7+ 32-bit mode). */
  147. size_t size;
  148. static cpu_type_t cputype = 0;
  149. if (cputype == 0) {
  150. sysctlbyname("hw.cputype", &cputype, &size, NULL, 0);
  151. }
  152. if (cputype == CPU_TYPE_ARM64
  153. || kCFCoreFoundationVersionNumber
  154. >= kCFCoreFoundationVersionNumber_iOS_8_0) {
  155. arm_unified_thread_state_t unified_state;
  156. mach_msg_type_number_t unified_thread_state_count
  157. = ARM_UNIFIED_THREAD_STATE_COUNT;
  158. # if defined(CPPCHECK)
  159. # define GC_ARM_UNIFIED_THREAD_STATE 1
  160. # else
  161. # define GC_ARM_UNIFIED_THREAD_STATE ARM_UNIFIED_THREAD_STATE
  162. # endif
  163. kern_result = thread_get_state(thread, GC_ARM_UNIFIED_THREAD_STATE,
  164. (natural_t *)&unified_state,
  165. &unified_thread_state_count);
  166. # if !defined(CPPCHECK)
  167. if (unified_state.ash.flavor != ARM_THREAD_STATE32) {
  168. ABORT("unified_state flavor should be ARM_THREAD_STATE32");
  169. }
  170. # endif
  171. state = unified_state;
  172. } else
  173. # endif
  174. /* else */ {
  175. mach_msg_type_number_t thread_state_count = GC_MACH_THREAD_STATE_COUNT;
  176. do {
  177. /* Get the thread state (registers, etc) */
  178. kern_result = thread_get_state(thread, GC_MACH_THREAD_STATE,
  179. (natural_t *)&state,
  180. &thread_state_count);
  181. } while (kern_result == KERN_ABORTED);
  182. }
  183. # ifdef DEBUG_THREADS
  184. GC_log_printf("thread_get_state returns value = %d\n", kern_result);
  185. # endif
  186. if (kern_result != KERN_SUCCESS)
  187. ABORT("thread_get_state failed");
  188. # if defined(I386)
  189. lo = (ptr_t)state.THREAD_FLD(esp);
  190. # ifndef DARWIN_DONT_PARSE_STACK
  191. *phi = GC_FindTopOfStack(state.THREAD_FLD(esp));
  192. # endif
  193. GC_push_one(state.THREAD_FLD(eax));
  194. GC_push_one(state.THREAD_FLD(ebx));
  195. GC_push_one(state.THREAD_FLD(ecx));
  196. GC_push_one(state.THREAD_FLD(edx));
  197. GC_push_one(state.THREAD_FLD(edi));
  198. GC_push_one(state.THREAD_FLD(esi));
  199. GC_push_one(state.THREAD_FLD(ebp));
  200. # elif defined(X86_64)
  201. lo = (ptr_t)state.THREAD_FLD(rsp);
  202. # ifndef DARWIN_DONT_PARSE_STACK
  203. *phi = GC_FindTopOfStack(state.THREAD_FLD(rsp));
  204. # endif
  205. GC_push_one(state.THREAD_FLD(rax));
  206. GC_push_one(state.THREAD_FLD(rbx));
  207. GC_push_one(state.THREAD_FLD(rcx));
  208. GC_push_one(state.THREAD_FLD(rdx));
  209. GC_push_one(state.THREAD_FLD(rdi));
  210. GC_push_one(state.THREAD_FLD(rsi));
  211. GC_push_one(state.THREAD_FLD(rbp));
  212. /* GC_push_one(state.THREAD_FLD(rsp)); */
  213. GC_push_one(state.THREAD_FLD(r8));
  214. GC_push_one(state.THREAD_FLD(r9));
  215. GC_push_one(state.THREAD_FLD(r10));
  216. GC_push_one(state.THREAD_FLD(r11));
  217. GC_push_one(state.THREAD_FLD(r12));
  218. GC_push_one(state.THREAD_FLD(r13));
  219. GC_push_one(state.THREAD_FLD(r14));
  220. GC_push_one(state.THREAD_FLD(r15));
  221. # elif defined(POWERPC)
  222. lo = (ptr_t)(state.THREAD_FLD(r1) - PPC_RED_ZONE_SIZE);
  223. # ifndef DARWIN_DONT_PARSE_STACK
  224. *phi = GC_FindTopOfStack(state.THREAD_FLD(r1));
  225. # endif
  226. GC_push_one(state.THREAD_FLD(r0));
  227. GC_push_one(state.THREAD_FLD(r2));
  228. GC_push_one(state.THREAD_FLD(r3));
  229. GC_push_one(state.THREAD_FLD(r4));
  230. GC_push_one(state.THREAD_FLD(r5));
  231. GC_push_one(state.THREAD_FLD(r6));
  232. GC_push_one(state.THREAD_FLD(r7));
  233. GC_push_one(state.THREAD_FLD(r8));
  234. GC_push_one(state.THREAD_FLD(r9));
  235. GC_push_one(state.THREAD_FLD(r10));
  236. GC_push_one(state.THREAD_FLD(r11));
  237. GC_push_one(state.THREAD_FLD(r12));
  238. GC_push_one(state.THREAD_FLD(r13));
  239. GC_push_one(state.THREAD_FLD(r14));
  240. GC_push_one(state.THREAD_FLD(r15));
  241. GC_push_one(state.THREAD_FLD(r16));
  242. GC_push_one(state.THREAD_FLD(r17));
  243. GC_push_one(state.THREAD_FLD(r18));
  244. GC_push_one(state.THREAD_FLD(r19));
  245. GC_push_one(state.THREAD_FLD(r20));
  246. GC_push_one(state.THREAD_FLD(r21));
  247. GC_push_one(state.THREAD_FLD(r22));
  248. GC_push_one(state.THREAD_FLD(r23));
  249. GC_push_one(state.THREAD_FLD(r24));
  250. GC_push_one(state.THREAD_FLD(r25));
  251. GC_push_one(state.THREAD_FLD(r26));
  252. GC_push_one(state.THREAD_FLD(r27));
  253. GC_push_one(state.THREAD_FLD(r28));
  254. GC_push_one(state.THREAD_FLD(r29));
  255. GC_push_one(state.THREAD_FLD(r30));
  256. GC_push_one(state.THREAD_FLD(r31));
  257. # elif defined(ARM32)
  258. lo = (ptr_t)state.THREAD_FLD(sp);
  259. # ifndef DARWIN_DONT_PARSE_STACK
  260. *phi = GC_FindTopOfStack(state.THREAD_FLD(r[7])); /* fp */
  261. # endif
  262. {
  263. int j;
  264. for (j = 0; j < 7; j++)
  265. GC_push_one(state.THREAD_FLD(r[j]));
  266. j++; /* "r7" is skipped (iOS uses it as a frame pointer) */
  267. for (; j <= 12; j++)
  268. GC_push_one(state.THREAD_FLD(r[j]));
  269. }
  270. /* "cpsr", "pc" and "sp" are skipped */
  271. GC_push_one(state.THREAD_FLD(lr));
  272. # elif defined(AARCH64)
  273. lo = (ptr_t)state.THREAD_FLD(sp);
  274. # ifndef DARWIN_DONT_PARSE_STACK
  275. *phi = GC_FindTopOfStack(state.THREAD_FLD(fp));
  276. # endif
  277. {
  278. int j;
  279. for (j = 0; j <= 28; j++) {
  280. GC_push_one(state.THREAD_FLD(x[j]));
  281. }
  282. }
  283. /* "cpsr", "fp", "pc" and "sp" are skipped */
  284. GC_push_one(state.THREAD_FLD(lr));
  285. # elif defined(CPPCHECK)
  286. lo = NULL;
  287. # else
  288. # error FIXME for non-arm/ppc/x86 architectures
  289. # endif
  290. } /* thread != my_thread */
  291. # ifdef DARWIN_DONT_PARSE_STACK
  292. /* p is guaranteed to be non-NULL regardless of GC_query_task_threads. */
  293. *phi = (p->flags & MAIN_THREAD) != 0 ? GC_stackbottom : p->stack_end;
  294. # endif
  295. /* TODO: Determine p and handle altstack if !DARWIN_DONT_PARSE_STACK */
  296. # ifdef DARWIN_DONT_PARSE_STACK
  297. if (p->altstack != NULL && (word)p->altstack <= (word)lo
  298. && (word)lo <= (word)p->altstack + p->altstack_size) {
  299. *paltstack_lo = lo;
  300. *paltstack_hi = p->altstack + p->altstack_size;
  301. lo = p->stack;
  302. *phi = p->stack + p->stack_size;
  303. } else
  304. # endif
  305. /* else */ {
  306. *paltstack_lo = NULL;
  307. }
  308. # ifdef DEBUG_THREADS
  309. GC_log_printf("Darwin: Stack for thread %p = [%p,%p)\n",
  310. (void *)(word)thread, (void *)lo, (void *)(*phi));
  311. # endif
  312. return lo;
  313. }
  314. GC_INNER void GC_push_all_stacks(void)
  315. {
  316. ptr_t hi, altstack_lo, altstack_hi;
  317. task_t my_task = current_task();
  318. mach_port_t my_thread = mach_thread_self();
  319. GC_bool found_me = FALSE;
  320. int nthreads = 0;
  321. word total_size = 0;
  322. mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ;
  323. if (!EXPECT(GC_thr_initialized, TRUE))
  324. GC_thr_init();
  325. # ifndef DARWIN_DONT_PARSE_STACK
  326. if (GC_query_task_threads) {
  327. int i;
  328. kern_return_t kern_result;
  329. thread_act_array_t act_list = 0;
  330. /* Obtain the list of the threads from the kernel. */
  331. kern_result = task_threads(my_task, &act_list, &listcount);
  332. if (kern_result != KERN_SUCCESS)
  333. ABORT("task_threads failed");
  334. for (i = 0; i < (int)listcount; i++) {
  335. thread_act_t thread = act_list[i];
  336. ptr_t lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
  337. &altstack_lo, &altstack_hi);
  338. if (lo) {
  339. GC_ASSERT((word)lo <= (word)hi);
  340. total_size += hi - lo;
  341. GC_push_all_stack(lo, hi);
  342. }
  343. /* TODO: Handle altstack */
  344. nthreads++;
  345. if (thread == my_thread)
  346. found_me = TRUE;
  347. mach_port_deallocate(my_task, thread);
  348. } /* for (i=0; ...) */
  349. vm_deallocate(my_task, (vm_address_t)act_list,
  350. sizeof(thread_t) * listcount);
  351. } else
  352. # endif /* !DARWIN_DONT_PARSE_STACK */
  353. /* else */ {
  354. int i;
  355. for (i = 0; i < (int)listcount; i++) {
  356. GC_thread p;
  357. for (p = GC_threads[i]; p != NULL; p = p->next)
  358. if ((p->flags & FINISHED) == 0) {
  359. thread_act_t thread = (thread_act_t)p->stop_info.mach_thread;
  360. ptr_t lo = GC_stack_range_for(&hi, thread, p,
  361. (GC_bool)p->thread_blocked,
  362. my_thread, &altstack_lo,
  363. &altstack_hi);
  364. if (lo) {
  365. GC_ASSERT((word)lo <= (word)hi);
  366. total_size += hi - lo;
  367. GC_push_all_stack_sections(lo, hi, p->traced_stack_sect);
  368. }
  369. if (altstack_lo) {
  370. total_size += altstack_hi - altstack_lo;
  371. GC_push_all_stack(altstack_lo, altstack_hi);
  372. }
  373. nthreads++;
  374. if (thread == my_thread)
  375. found_me = TRUE;
  376. }
  377. } /* for (i=0; ...) */
  378. }
  379. mach_port_deallocate(my_task, my_thread);
  380. GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", nthreads);
  381. if (!found_me && !GC_in_thread_creation)
  382. ABORT("Collecting from unknown thread");
  383. GC_total_stacksize = total_size;
  384. }
  385. #ifndef GC_NO_THREADS_DISCOVERY
  386. # ifdef MPROTECT_VDB
  387. STATIC mach_port_t GC_mach_handler_thread = 0;
  388. STATIC GC_bool GC_use_mach_handler_thread = FALSE;
  389. GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread)
  390. {
  391. GC_mach_handler_thread = thread;
  392. GC_use_mach_handler_thread = TRUE;
  393. }
  394. # endif /* MPROTECT_VDB */
  395. # ifndef GC_MAX_MACH_THREADS
  396. # define GC_MAX_MACH_THREADS THREAD_TABLE_SZ
  397. # endif
  398. struct GC_mach_thread {
  399. thread_act_t thread;
  400. GC_bool already_suspended;
  401. };
  402. struct GC_mach_thread GC_mach_threads[GC_MAX_MACH_THREADS];
  403. STATIC int GC_mach_threads_count = 0;
  404. /* FIXME: it is better to implement GC_mach_threads as a hash set. */
  405. /* returns true if there's a thread in act_list that wasn't in old_list */
  406. STATIC GC_bool GC_suspend_thread_list(thread_act_array_t act_list, int count,
  407. thread_act_array_t old_list,
  408. int old_count, mach_port_t my_thread)
  409. {
  410. int i;
  411. int j = -1;
  412. GC_bool changed = FALSE;
  413. for (i = 0; i < count; i++) {
  414. thread_act_t thread = act_list[i];
  415. GC_bool found;
  416. struct thread_basic_info info;
  417. mach_msg_type_number_t outCount;
  418. kern_return_t kern_result;
  419. if (thread == my_thread
  420. # ifdef MPROTECT_VDB
  421. || (GC_mach_handler_thread == thread && GC_use_mach_handler_thread)
  422. # endif
  423. ) {
  424. /* Don't add our and the handler threads. */
  425. continue;
  426. }
  427. # ifdef PARALLEL_MARK
  428. if (GC_is_mach_marker(thread))
  429. continue; /* ignore the parallel marker threads */
  430. # endif
  431. # ifdef DEBUG_THREADS
  432. GC_log_printf("Attempting to suspend thread %p\n",
  433. (void *)(word)thread);
  434. # endif
  435. /* find the current thread in the old list */
  436. found = FALSE;
  437. {
  438. int last_found = j; /* remember the previous found thread index */
  439. /* Search for the thread starting from the last found one first. */
  440. while (++j < old_count)
  441. if (old_list[j] == thread) {
  442. found = TRUE;
  443. break;
  444. }
  445. if (!found) {
  446. /* If not found, search in the rest (beginning) of the list. */
  447. for (j = 0; j < last_found; j++)
  448. if (old_list[j] == thread) {
  449. found = TRUE;
  450. break;
  451. }
  452. if (!found) {
  453. /* add it to the GC_mach_threads list */
  454. if (GC_mach_threads_count == GC_MAX_MACH_THREADS)
  455. ABORT("Too many threads");
  456. GC_mach_threads[GC_mach_threads_count].thread = thread;
  457. /* default is not suspended */
  458. GC_mach_threads[GC_mach_threads_count].already_suspended = FALSE;
  459. changed = TRUE;
  460. }
  461. }
  462. }
  463. outCount = THREAD_INFO_MAX;
  464. kern_result = thread_info(thread, THREAD_BASIC_INFO,
  465. (thread_info_t)&info, &outCount);
  466. if (kern_result != KERN_SUCCESS) {
  467. /* The thread may have quit since the thread_threads() call we */
  468. /* mark already suspended so it's not dealt with anymore later. */
  469. if (!found)
  470. GC_mach_threads[GC_mach_threads_count++].already_suspended = TRUE;
  471. continue;
  472. }
  473. # ifdef DEBUG_THREADS
  474. GC_log_printf("Thread state for %p = %d\n", (void *)(word)thread,
  475. info.run_state);
  476. # endif
  477. if (info.suspend_count != 0) {
  478. /* thread is already suspended. */
  479. if (!found)
  480. GC_mach_threads[GC_mach_threads_count++].already_suspended = TRUE;
  481. continue;
  482. }
  483. # ifdef DEBUG_THREADS
  484. GC_log_printf("Suspending %p\n", (void *)(word)thread);
  485. # endif
  486. /* Unconditionally suspend the thread. It will do no */
  487. /* harm if it is already suspended by the client logic. */
  488. GC_acquire_dirty_lock();
  489. do {
  490. kern_result = thread_suspend(thread);
  491. } while (kern_result == KERN_ABORTED);
  492. GC_release_dirty_lock();
  493. if (kern_result != KERN_SUCCESS) {
  494. /* The thread may have quit since the thread_threads() call we */
  495. /* mark already suspended so it's not dealt with anymore later. */
  496. if (!found)
  497. GC_mach_threads[GC_mach_threads_count++].already_suspended = TRUE;
  498. continue;
  499. }
  500. if (!found)
  501. GC_mach_threads_count++;
  502. if (GC_on_thread_event)
  503. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, (void *)(word)thread);
  504. }
  505. return changed;
  506. }
  507. #endif /* !GC_NO_THREADS_DISCOVERY */
  508. /* Caller holds allocation lock. */
  509. GC_INNER void GC_stop_world(void)
  510. {
  511. task_t my_task = current_task();
  512. mach_port_t my_thread = mach_thread_self();
  513. kern_return_t kern_result;
  514. # ifdef DEBUG_THREADS
  515. GC_log_printf("Stopping the world from thread %p\n",
  516. (void *)(word)my_thread);
  517. # endif
  518. # ifdef PARALLEL_MARK
  519. if (GC_parallel) {
  520. /* Make sure all free list construction has stopped before we */
  521. /* start. No new construction can start, since free list */
  522. /* construction is required to acquire and release the GC lock */
  523. /* before it starts, and we have the lock. */
  524. GC_acquire_mark_lock();
  525. GC_ASSERT(GC_fl_builder_count == 0);
  526. /* We should have previously waited for it to become zero. */
  527. }
  528. # endif /* PARALLEL_MARK */
  529. if (GC_query_task_threads) {
  530. # ifndef GC_NO_THREADS_DISCOVERY
  531. unsigned i;
  532. GC_bool changed;
  533. thread_act_array_t act_list, prev_list;
  534. mach_msg_type_number_t listcount, prevcount;
  535. /* Clear out the mach threads list table. We do not need to */
  536. /* really clear GC_mach_threads[] as it is used only in the range */
  537. /* from 0 to GC_mach_threads_count-1, inclusive. */
  538. GC_mach_threads_count = 0;
  539. /* Loop stopping threads until you have gone over the whole list */
  540. /* twice without a new one appearing. thread_create() won't */
  541. /* return (and thus the thread stop) until the new thread exists, */
  542. /* so there is no window whereby you could stop a thread, */
  543. /* recognize it is stopped, but then have a new thread it created */
  544. /* before stopping show up later. */
  545. changed = TRUE;
  546. prev_list = NULL;
  547. prevcount = 0;
  548. do {
  549. kern_result = task_threads(my_task, &act_list, &listcount);
  550. if (kern_result == KERN_SUCCESS) {
  551. changed = GC_suspend_thread_list(act_list, listcount, prev_list,
  552. prevcount, my_thread);
  553. if (prev_list != NULL) {
  554. for (i = 0; i < prevcount; i++)
  555. mach_port_deallocate(my_task, prev_list[i]);
  556. vm_deallocate(my_task, (vm_address_t)prev_list,
  557. sizeof(thread_t) * prevcount);
  558. }
  559. /* Repeat while having changes. */
  560. prev_list = act_list;
  561. prevcount = listcount;
  562. }
  563. } while (changed);
  564. GC_ASSERT(prev_list != 0);
  565. for (i = 0; i < prevcount; i++)
  566. mach_port_deallocate(my_task, prev_list[i]);
  567. vm_deallocate(my_task, (vm_address_t)act_list,
  568. sizeof(thread_t) * listcount);
  569. # endif /* !GC_NO_THREADS_DISCOVERY */
  570. } else {
  571. unsigned i;
  572. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  573. GC_thread p;
  574. for (p = GC_threads[i]; p != NULL; p = p->next) {
  575. if ((p->flags & FINISHED) == 0 && !p->thread_blocked &&
  576. p->stop_info.mach_thread != my_thread) {
  577. GC_acquire_dirty_lock();
  578. do {
  579. kern_result = thread_suspend(p->stop_info.mach_thread);
  580. } while (kern_result == KERN_ABORTED);
  581. GC_release_dirty_lock();
  582. if (kern_result != KERN_SUCCESS)
  583. ABORT("thread_suspend failed");
  584. if (GC_on_thread_event)
  585. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
  586. (void *)(word)p->stop_info.mach_thread);
  587. }
  588. }
  589. }
  590. }
  591. # ifdef MPROTECT_VDB
  592. if(GC_incremental) {
  593. GC_mprotect_stop();
  594. }
  595. # endif
  596. # ifdef PARALLEL_MARK
  597. if (GC_parallel)
  598. GC_release_mark_lock();
  599. # endif
  600. # ifdef DEBUG_THREADS
  601. GC_log_printf("World stopped from %p\n", (void *)(word)my_thread);
  602. # endif
  603. mach_port_deallocate(my_task, my_thread);
  604. }
  605. GC_INLINE void GC_thread_resume(thread_act_t thread)
  606. {
  607. kern_return_t kern_result;
  608. # if defined(DEBUG_THREADS) || defined(GC_ASSERTIONS)
  609. struct thread_basic_info info;
  610. mach_msg_type_number_t outCount = THREAD_INFO_MAX;
  611. kern_result = thread_info(thread, THREAD_BASIC_INFO,
  612. (thread_info_t)&info, &outCount);
  613. if (kern_result != KERN_SUCCESS)
  614. ABORT("thread_info failed");
  615. # endif
  616. # ifdef DEBUG_THREADS
  617. GC_log_printf("Resuming thread %p with state %d\n", (void *)(word)thread,
  618. info.run_state);
  619. # endif
  620. /* Resume the thread */
  621. kern_result = thread_resume(thread);
  622. if (kern_result != KERN_SUCCESS)
  623. ABORT("thread_resume failed");
  624. if (GC_on_thread_event)
  625. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, (void *)(word)thread);
  626. }
  627. /* Caller holds allocation lock, and has held it continuously since */
  628. /* the world stopped. */
  629. GC_INNER void GC_start_world(void)
  630. {
  631. task_t my_task = current_task();
  632. # ifdef DEBUG_THREADS
  633. GC_log_printf("World starting\n");
  634. # endif
  635. # ifdef MPROTECT_VDB
  636. if(GC_incremental) {
  637. GC_mprotect_resume();
  638. }
  639. # endif
  640. if (GC_query_task_threads) {
  641. # ifndef GC_NO_THREADS_DISCOVERY
  642. int i;
  643. int j = GC_mach_threads_count;
  644. kern_return_t kern_result;
  645. thread_act_array_t act_list;
  646. mach_msg_type_number_t listcount;
  647. kern_result = task_threads(my_task, &act_list, &listcount);
  648. if (kern_result != KERN_SUCCESS)
  649. ABORT("task_threads failed");
  650. for (i = 0; i < (int)listcount; i++) {
  651. thread_act_t thread = act_list[i];
  652. int last_found = j; /* The thread index found during the */
  653. /* previous iteration (count value */
  654. /* means no thread found yet). */
  655. /* Search for the thread starting from the last found one first. */
  656. while (++j < GC_mach_threads_count) {
  657. if (GC_mach_threads[j].thread == thread)
  658. break;
  659. }
  660. if (j >= GC_mach_threads_count) {
  661. /* If not found, search in the rest (beginning) of the list. */
  662. for (j = 0; j < last_found; j++) {
  663. if (GC_mach_threads[j].thread == thread)
  664. break;
  665. }
  666. }
  667. if (j != last_found) {
  668. /* The thread is found in GC_mach_threads. */
  669. if (GC_mach_threads[j].already_suspended) {
  670. # ifdef DEBUG_THREADS
  671. GC_log_printf("Not resuming already suspended thread %p\n",
  672. (void *)(word)thread);
  673. # endif
  674. } else {
  675. GC_thread_resume(thread);
  676. }
  677. }
  678. mach_port_deallocate(my_task, thread);
  679. }
  680. vm_deallocate(my_task, (vm_address_t)act_list,
  681. sizeof(thread_t) * listcount);
  682. # endif /* !GC_NO_THREADS_DISCOVERY */
  683. } else {
  684. int i;
  685. mach_port_t my_thread = mach_thread_self();
  686. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  687. GC_thread p;
  688. for (p = GC_threads[i]; p != NULL; p = p->next) {
  689. if ((p->flags & FINISHED) == 0 && !p->thread_blocked &&
  690. p->stop_info.mach_thread != my_thread)
  691. GC_thread_resume(p->stop_info.mach_thread);
  692. }
  693. }
  694. mach_port_deallocate(my_task, my_thread);
  695. }
  696. # ifdef DEBUG_THREADS
  697. GC_log_printf("World started\n");
  698. # endif
  699. }
  700. #endif /* GC_DARWIN_THREADS */