gc_pmark.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /*
  2. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. *
  14. */
  15. /* Private declarations of GC marker data structures and macros */
  16. /*
  17. * Declarations of mark stack. Needed by marker and client supplied mark
  18. * routines. Transitively include gc_priv.h.
  19. */
  20. #ifndef GC_PMARK_H
  21. #define GC_PMARK_H
  22. #if defined(HAVE_CONFIG_H) && !defined(GC_PRIVATE_H)
  23. # include "config.h"
  24. #endif
  25. #ifndef GC_BUILD
  26. # define GC_BUILD
  27. #endif
  28. #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \
  29. && !defined(_GNU_SOURCE) && defined(GC_PTHREADS) \
  30. && !defined(GC_NO_PTHREAD_SIGMASK)
  31. # define _GNU_SOURCE 1
  32. #endif
  33. #if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST)
  34. # include "dbg_mlc.h"
  35. #endif
  36. #ifndef GC_MARK_H
  37. # include "../gc_mark.h"
  38. #endif
  39. #ifndef GC_PRIVATE_H
  40. # include "gc_priv.h"
  41. #endif
  42. EXTERN_C_BEGIN
  43. /* The real declarations of the following is in gc_priv.h, so that */
  44. /* we can avoid scanning the following table. */
  45. /*
  46. mark_proc GC_mark_procs[MAX_MARK_PROCS];
  47. */
  48. #ifndef MARK_DESCR_OFFSET
  49. # define MARK_DESCR_OFFSET sizeof(word)
  50. #endif
  51. /*
  52. * Mark descriptor stuff that should remain private for now, mostly
  53. * because it's hard to export WORDSZ without including gcconfig.h.
  54. */
  55. #define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
  56. #define PROC(descr) \
  57. (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
  58. #define ENV(descr) \
  59. ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
  60. #define MAX_ENV \
  61. (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
  62. GC_EXTERN unsigned GC_n_mark_procs;
  63. /* Number of mark stack entries to discard on overflow. */
  64. #define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
  65. GC_EXTERN size_t GC_mark_stack_size;
  66. #ifdef PARALLEL_MARK
  67. /*
  68. * Allow multiple threads to participate in the marking process.
  69. * This works roughly as follows:
  70. * The main mark stack never shrinks, but it can grow.
  71. *
  72. * The initiating threads holds the GC lock, and sets GC_help_wanted.
  73. *
  74. * Other threads:
  75. * 1) update helper_count (while holding mark_lock.)
  76. * 2) allocate a local mark stack
  77. * repeatedly:
  78. * 3) Steal a global mark stack entry by atomically replacing
  79. * its descriptor with 0.
  80. * 4) Copy it to the local stack.
  81. * 5) Mark on the local stack until it is empty, or
  82. * it may be profitable to copy it back.
  83. * 6) If necessary, copy local stack to global one,
  84. * holding mark lock.
  85. * 7) Stop when the global mark stack is empty.
  86. * 8) decrement helper_count (holding mark_lock).
  87. *
  88. * This is an experiment to see if we can do something along the lines
  89. * of the University of Tokyo SGC in a less intrusive, though probably
  90. * also less performant, way.
  91. */
  92. /* GC_mark_stack_top is protected by mark lock. */
  93. /*
  94. * GC_notify_all_marker() is used when GC_help_wanted is first set,
  95. * when the last helper becomes inactive,
  96. * when something is added to the global mark stack, and just after
  97. * GC_mark_no is incremented.
  98. * This could be split into multiple CVs (and probably should be to
  99. * scale to really large numbers of processors.)
  100. */
  101. #endif /* PARALLEL_MARK */
  102. GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
  103. /* Push the object obj with corresponding heap block header hhdr onto */
  104. /* the mark stack. */
  105. #define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
  106. do { \
  107. word _descr = (hhdr) -> hb_descr; \
  108. GC_ASSERT(!HBLK_IS_FREE(hhdr)); \
  109. if (_descr != 0) { \
  110. mark_stack_top++; \
  111. if ((word)mark_stack_top >= (word)(mark_stack_limit)) { \
  112. mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
  113. } \
  114. mark_stack_top -> mse_start = (obj); \
  115. mark_stack_top -> mse_descr.w = _descr; \
  116. } \
  117. } while (0)
  118. /* Push the contents of current onto the mark stack if it is a valid */
  119. /* ptr to a currently unmarked object. Mark it. */
  120. #define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, source) \
  121. do { \
  122. hdr * my_hhdr; \
  123. HC_GET_HDR(current, my_hhdr, source); /* contains "break" */ \
  124. PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
  125. source, my_hhdr, TRUE); \
  126. } while (0)
  127. /* Set mark bit, exit (using "break" statement) if it is already set. */
  128. #ifdef USE_MARK_BYTES
  129. # if defined(PARALLEL_MARK) && defined(AO_HAVE_char_store) \
  130. && !defined(AO_USE_PTHREAD_DEFS)
  131. /* There is a race here, and we may set the bit twice in the */
  132. /* concurrent case. This can result in the object being pushed */
  133. /* twice. But that is only a performance issue. */
  134. # define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
  135. { /* cannot use do-while(0) here */ \
  136. volatile unsigned char * mark_byte_addr = \
  137. (unsigned char *)(hhdr)->hb_marks + (bit_no); \
  138. /* Unordered atomic load and store are sufficient here. */ \
  139. if (AO_char_load(mark_byte_addr) != 0) \
  140. break; /* go to the enclosing loop end */ \
  141. AO_char_store(mark_byte_addr, 1); \
  142. }
  143. # else
  144. # define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
  145. { /* cannot use do-while(0) here */ \
  146. char * mark_byte_addr = (char *)(hhdr)->hb_marks + (bit_no); \
  147. if (*mark_byte_addr != 0) break; /* go to the enclosing loop end */ \
  148. *mark_byte_addr = 1; \
  149. }
  150. # endif /* !PARALLEL_MARK */
  151. #else
  152. # ifdef PARALLEL_MARK
  153. /* This is used only if we explicitly set USE_MARK_BITS. */
  154. /* The following may fail to exit even if the bit was already set. */
  155. /* For our uses, that's benign: */
  156. # ifdef THREAD_SANITIZER
  157. # define OR_WORD_EXIT_IF_SET(addr, bits) \
  158. { /* cannot use do-while(0) here */ \
  159. if (!((word)AO_load((volatile AO_t *)(addr)) & (bits))) { \
  160. /* Atomic load is just to avoid TSan false positive. */ \
  161. AO_or((volatile AO_t *)(addr), (AO_t)(bits)); \
  162. } else { \
  163. break; /* go to the enclosing loop end */ \
  164. } \
  165. }
  166. # else
  167. # define OR_WORD_EXIT_IF_SET(addr, bits) \
  168. { /* cannot use do-while(0) here */ \
  169. if (!(*(addr) & (bits))) { \
  170. AO_or((volatile AO_t *)(addr), (AO_t)(bits)); \
  171. } else { \
  172. break; /* go to the enclosing loop end */ \
  173. } \
  174. }
  175. # endif /* !THREAD_SANITIZER */
  176. # else
  177. # define OR_WORD_EXIT_IF_SET(addr, bits) \
  178. { /* cannot use do-while(0) here */ \
  179. word old = *(addr); \
  180. word my_bits = (bits); \
  181. if ((old & my_bits) != 0) \
  182. break; /* go to the enclosing loop end */ \
  183. *(addr) = old | my_bits; \
  184. }
  185. # endif /* !PARALLEL_MARK */
  186. # define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
  187. { /* cannot use do-while(0) here */ \
  188. word * mark_word_addr = (hhdr)->hb_marks + divWORDSZ(bit_no); \
  189. OR_WORD_EXIT_IF_SET(mark_word_addr, \
  190. (word)1 << modWORDSZ(bit_no)); /* contains "break" */ \
  191. }
  192. #endif /* !USE_MARK_BYTES */
  193. #ifdef PARALLEL_MARK
  194. # define INCR_MARKS(hhdr) \
  195. AO_store(&hhdr->hb_n_marks, AO_load(&hhdr->hb_n_marks) + 1)
  196. #else
  197. # define INCR_MARKS(hhdr) (void)(++hhdr->hb_n_marks)
  198. #endif
  199. #ifdef ENABLE_TRACE
  200. # define TRACE(source, cmd) \
  201. if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
  202. # define TRACE_TARGET(target, cmd) \
  203. if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
  204. #else
  205. # define TRACE(source, cmd)
  206. # define TRACE_TARGET(source, cmd)
  207. #endif
  208. #if defined(I386) && defined(__GNUC__) && !defined(NACL)
  209. # define LONG_MULT(hprod, lprod, x, y) \
  210. do { \
  211. __asm__ __volatile__("mull %2" : "=a"(lprod), "=d"(hprod) \
  212. : "g"(y), "0"(x)); \
  213. } while (0)
  214. #else
  215. # if defined(__int64) && !defined(__GNUC__) && !defined(CPPCHECK)
  216. # define ULONG_MULT_T unsigned __int64
  217. # else
  218. # define ULONG_MULT_T unsigned long long
  219. # endif
  220. # define LONG_MULT(hprod, lprod, x, y) \
  221. do { \
  222. ULONG_MULT_T prod = (ULONG_MULT_T)(x) * (ULONG_MULT_T)(y); \
  223. GC_STATIC_ASSERT(sizeof(x) + sizeof(y) <= sizeof(prod)); \
  224. hprod = prod >> 32; \
  225. lprod = (unsigned32)prod; \
  226. } while (0)
  227. #endif /* !I386 */
  228. /* If the mark bit corresponding to current is not set, set it, and */
  229. /* push the contents of the object on the mark stack. Current points */
  230. /* to the beginning of the object. We rely on the fact that the */
  231. /* preceding header calculation will succeed for a pointer past the */
  232. /* first page of an object, only if it is in fact a valid pointer */
  233. /* to the object. Thus we can omit the otherwise necessary tests */
  234. /* here. Note in particular that the "displ" value is the displacement */
  235. /* from the beginning of the heap block, which may itself be in the */
  236. /* interior of a large object. */
  237. #ifdef MARK_BIT_PER_GRANULE
  238. # define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
  239. source, hhdr, do_offset_check) \
  240. do { \
  241. size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
  242. /* displ is always within range. If current doesn't point to */ \
  243. /* first block, then we are in the all_interior_pointers case, and */ \
  244. /* it is safe to use any displacement value. */ \
  245. size_t gran_displ = BYTES_TO_GRANULES(displ); \
  246. size_t gran_offset = hhdr -> hb_map[gran_displ]; \
  247. size_t byte_offset = displ & (GRANULE_BYTES - 1); \
  248. ptr_t base = (ptr_t)(current); \
  249. /* The following always fails for large block references. */ \
  250. if (EXPECT((gran_offset | byte_offset) != 0, FALSE)) { \
  251. if ((hhdr -> hb_flags & LARGE_BLOCK) != 0) { \
  252. /* gran_offset is bogus. */ \
  253. size_t obj_displ; \
  254. base = (ptr_t)(hhdr -> hb_block); \
  255. obj_displ = (ptr_t)(current) - base; \
  256. if (obj_displ != displ) { \
  257. GC_ASSERT(obj_displ < hhdr -> hb_sz); \
  258. /* Must be in all_interior_pointer case, not first block */ \
  259. /* already did validity check on cache miss. */ \
  260. } else { \
  261. if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
  262. GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
  263. break; /* go to the end of PUSH_CONTENTS_HDR */ \
  264. } \
  265. } \
  266. gran_displ = 0; \
  267. GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
  268. hhdr -> hb_block == HBLKPTR(current)); \
  269. GC_ASSERT((word)hhdr->hb_block <= (word)(current)); \
  270. } else { \
  271. size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
  272. + byte_offset; \
  273. if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
  274. GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
  275. break; \
  276. } \
  277. gran_displ -= gran_offset; \
  278. base -= obj_displ; \
  279. } \
  280. } \
  281. GC_ASSERT(hhdr == GC_find_header(base)); \
  282. GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
  283. TRACE(source, GC_log_printf("GC #%u: passed validity tests\n", \
  284. (unsigned)GC_gc_no)); \
  285. SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ); \
  286. TRACE(source, GC_log_printf("GC #%u: previously unmarked\n", \
  287. (unsigned)GC_gc_no)); \
  288. TRACE_TARGET(base, \
  289. GC_log_printf("GC #%u: marking %p from %p instead\n", \
  290. (unsigned)GC_gc_no, (void *)base, (void *)(source))); \
  291. INCR_MARKS(hhdr); \
  292. GC_STORE_BACK_PTR((ptr_t)(source), base); \
  293. PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
  294. } while (0)
  295. #endif /* MARK_BIT_PER_GRANULE */
  296. #ifdef MARK_BIT_PER_OBJ
  297. # define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
  298. source, hhdr, do_offset_check) \
  299. do { \
  300. size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
  301. unsigned32 low_prod, high_prod; \
  302. unsigned32 inv_sz = hhdr -> hb_inv_sz; \
  303. ptr_t base = (ptr_t)(current); \
  304. LONG_MULT(high_prod, low_prod, (unsigned32)displ, inv_sz); \
  305. /* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
  306. if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
  307. /* FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 */ \
  308. if (inv_sz == LARGE_INV_SZ) { \
  309. size_t obj_displ; \
  310. base = (ptr_t)(hhdr -> hb_block); \
  311. obj_displ = (ptr_t)(current) - base; \
  312. if (obj_displ != displ) { \
  313. GC_ASSERT(obj_displ < hhdr -> hb_sz); \
  314. /* Must be in all_interior_pointer case, not first block */ \
  315. /* already did validity check on cache miss. */ \
  316. } else { \
  317. if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
  318. GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
  319. break; /* go to the end of PUSH_CONTENTS_HDR */ \
  320. } \
  321. } \
  322. GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
  323. hhdr -> hb_block == HBLKPTR(current)); \
  324. GC_ASSERT((word)hhdr->hb_block < (word)(current)); \
  325. } else { \
  326. size_t obj_displ; \
  327. /* Accurate enough if HBLKSIZE <= 2**15. */ \
  328. GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
  329. obj_displ = (((low_prod >> 16) + 1) * (size_t)hhdr->hb_sz) >> 16; \
  330. if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
  331. GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
  332. break; \
  333. } \
  334. base -= obj_displ; \
  335. } \
  336. } \
  337. /* May get here for pointer to start of block not at */ \
  338. /* beginning of object. If so, it's valid, and we're fine. */ \
  339. GC_ASSERT(high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
  340. TRACE(source, GC_log_printf("GC #%u: passed validity tests\n", \
  341. (unsigned)GC_gc_no)); \
  342. SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod); \
  343. TRACE(source, GC_log_printf("GC #%u: previously unmarked\n", \
  344. (unsigned)GC_gc_no)); \
  345. TRACE_TARGET(base, \
  346. GC_log_printf("GC #%u: marking %p from %p instead\n", \
  347. (unsigned)GC_gc_no, (void *)base, (void *)(source))); \
  348. INCR_MARKS(hhdr); \
  349. GC_STORE_BACK_PTR((ptr_t)(source), base); \
  350. PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
  351. } while (0)
  352. #endif /* MARK_BIT_PER_OBJ */
  353. #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
  354. # define PUSH_ONE_CHECKED_STACK(p, source) \
  355. GC_mark_and_push_stack((ptr_t)(p), (ptr_t)(source))
  356. #else
  357. # define PUSH_ONE_CHECKED_STACK(p, source) \
  358. GC_mark_and_push_stack((ptr_t)(p))
  359. #endif
  360. /*
  361. * Push a single value onto mark stack. Mark from the object pointed to by p.
  362. * Invoke FIXUP_POINTER(p) before any further processing.
  363. * P is considered valid even if it is an interior pointer.
  364. * Previously marked objects are not pushed. Hence we make progress even
  365. * if the mark stack overflows.
  366. */
  367. #ifdef NEED_FIXUP_POINTER
  368. /* Try both the raw version and the fixed up one. */
  369. # define GC_PUSH_ONE_STACK(p, source) \
  370. do { \
  371. if ((word)(p) >= (word)GC_least_plausible_heap_addr \
  372. && (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
  373. PUSH_ONE_CHECKED_STACK(p, source); \
  374. } \
  375. FIXUP_POINTER(p); \
  376. if ((word)(p) >= (word)GC_least_plausible_heap_addr \
  377. && (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
  378. PUSH_ONE_CHECKED_STACK(p, source); \
  379. } \
  380. } while (0)
  381. #else /* !NEED_FIXUP_POINTER */
  382. # define GC_PUSH_ONE_STACK(p, source) \
  383. do { \
  384. if ((word)(p) >= (word)GC_least_plausible_heap_addr \
  385. && (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
  386. PUSH_ONE_CHECKED_STACK(p, source); \
  387. } \
  388. } while (0)
  389. #endif
  390. /* As above, but interior pointer recognition as for normal heap pointers. */
  391. #define GC_PUSH_ONE_HEAP(p,source,mark_stack_top) \
  392. do { \
  393. FIXUP_POINTER(p); \
  394. if ((word)(p) >= (word)GC_least_plausible_heap_addr \
  395. && (word)(p) < (word)GC_greatest_plausible_heap_addr) \
  396. mark_stack_top = GC_mark_and_push((void *)(p), mark_stack_top, \
  397. GC_mark_stack_limit, (void * *)(source)); \
  398. } while (0)
  399. /* Mark starting at mark stack entry top (incl.) down to */
  400. /* mark stack entry bottom (incl.). Stop after performing */
  401. /* about one page worth of work. Return the new mark stack */
  402. /* top entry. */
  403. GC_INNER mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
  404. #define MARK_FROM_MARK_STACK() \
  405. GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
  406. GC_mark_stack, \
  407. GC_mark_stack + GC_mark_stack_size);
  408. #define GC_mark_stack_empty() ((word)GC_mark_stack_top < (word)GC_mark_stack)
  409. /*
  410. * Mark from one finalizable object using the specified
  411. * mark proc. May not mark the object pointed to by
  412. * real_ptr. That is the job of the caller, if appropriate.
  413. * Note that this is called with the mutator running, but
  414. * with us holding the allocation lock. This is safe only if the
  415. * mutator needs the allocation lock to reveal hidden pointers.
  416. * FIXME: Why do we need the GC_mark_state test below?
  417. */
  418. #define GC_MARK_FO(real_ptr, mark_proc) \
  419. do { \
  420. (*(mark_proc))(real_ptr); \
  421. while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
  422. if (GC_mark_state != MS_NONE) { \
  423. GC_set_mark_bit(real_ptr); \
  424. while (!GC_mark_some((ptr_t)0)) { /* empty */ } \
  425. } \
  426. } while (0)
  427. GC_EXTERN GC_bool GC_mark_stack_too_small;
  428. /* We need a larger mark stack. May be */
  429. /* set by client supplied mark routines.*/
  430. typedef int mark_state_t; /* Current state of marking, as follows:*/
  431. /* Used to remember where we are during */
  432. /* concurrent marking. */
  433. /* We say something is dirty if it was */
  434. /* written since the last time we */
  435. /* retrieved dirty bits. We say it's */
  436. /* grungy if it was marked dirty in the */
  437. /* last set of bits we retrieved. */
  438. /* Invariant "I": all roots and marked */
  439. /* objects p are either dirty, or point */
  440. /* to objects q that are either marked */
  441. /* or a pointer to q appears in a range */
  442. /* on the mark stack. */
  443. #define MS_NONE 0 /* No marking in progress. "I" holds. */
  444. /* Mark stack is empty. */
  445. #define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
  446. /* being pushed. "I" holds, except */
  447. /* that grungy roots may point to */
  448. /* unmarked objects, as may marked */
  449. /* grungy objects above scan_ptr. */
  450. #define MS_PUSH_UNCOLLECTABLE 2 /* "I" holds, except that marked */
  451. /* uncollectible objects above scan_ptr */
  452. /* may point to unmarked objects. */
  453. /* Roots may point to unmarked objects */
  454. #define MS_ROOTS_PUSHED 3 /* "I" holds, mark stack may be nonempty. */
  455. #define MS_PARTIALLY_INVALID 4 /* "I" may not hold, e.g. because of */
  456. /* the mark stack overflow. However */
  457. /* marked heap objects below scan_ptr */
  458. /* point to marked or stacked objects. */
  459. #define MS_INVALID 5 /* "I" may not hold. */
  460. GC_EXTERN mark_state_t GC_mark_state;
  461. EXTERN_C_END
  462. #endif /* GC_PMARK_H */