mark_rts.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. #include "private/gc_priv.h"
  15. #include <stdio.h>
  16. /* Data structure for list of root sets. */
  17. /* We keep a hash table, so that we can filter out duplicate additions. */
  18. /* Under Win32, we need to do a better job of filtering overlaps, so */
  19. /* we resort to sequential search, and pay the price. */
  20. /* This is really declared in gc_priv.h:
  21. struct roots {
  22. ptr_t r_start;
  23. ptr_t r_end;
  24. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  25. struct roots * r_next;
  26. # endif
  27. GC_bool r_tmp;
  28. -- Delete before registering new dynamic libraries
  29. };
  30. struct roots GC_static_roots[MAX_ROOT_SETS];
  31. */
  32. int GC_no_dls = 0; /* Register dynamic library data segments. */
  33. static int n_root_sets = 0;
  34. /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
  35. #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
  36. /* Should return the same value as GC_root_size. */
  37. GC_INNER word GC_compute_root_size(void)
  38. {
  39. int i;
  40. word size = 0;
  41. for (i = 0; i < n_root_sets; i++) {
  42. size += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
  43. }
  44. return size;
  45. }
  46. #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
  47. #if !defined(NO_DEBUGGING)
  48. /* For debugging: */
  49. void GC_print_static_roots(void)
  50. {
  51. int i;
  52. word size;
  53. for (i = 0; i < n_root_sets; i++) {
  54. GC_printf("From %p to %p%s\n",
  55. (void *)GC_static_roots[i].r_start,
  56. (void *)GC_static_roots[i].r_end,
  57. GC_static_roots[i].r_tmp ? " (temporary)" : "");
  58. }
  59. GC_printf("GC_root_size: %lu\n", (unsigned long)GC_root_size);
  60. if ((size = GC_compute_root_size()) != GC_root_size)
  61. GC_err_printf("GC_root_size incorrect!! Should be: %lu\n",
  62. (unsigned long)size);
  63. }
  64. #endif /* !NO_DEBUGGING */
  65. #ifndef THREADS
  66. /* Primarily for debugging support: */
  67. /* Is the address p in one of the registered static root sections? */
  68. GC_INNER GC_bool GC_is_static_root(void *p)
  69. {
  70. static int last_root_set = MAX_ROOT_SETS;
  71. int i;
  72. if (last_root_set < n_root_sets
  73. && (word)p >= (word)GC_static_roots[last_root_set].r_start
  74. && (word)p < (word)GC_static_roots[last_root_set].r_end)
  75. return(TRUE);
  76. for (i = 0; i < n_root_sets; i++) {
  77. if ((word)p >= (word)GC_static_roots[i].r_start
  78. && (word)p < (word)GC_static_roots[i].r_end) {
  79. last_root_set = i;
  80. return(TRUE);
  81. }
  82. }
  83. return(FALSE);
  84. }
  85. #endif /* !THREADS */
  86. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  87. /*
  88. # define LOG_RT_SIZE 6
  89. # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
  90. struct roots * GC_root_index[RT_SIZE];
  91. -- Hash table header. Used only to check whether a range is
  92. -- already present.
  93. -- really defined in gc_priv.h
  94. */
  95. GC_INLINE int rt_hash(ptr_t addr)
  96. {
  97. word result = (word) addr;
  98. # if CPP_WORDSZ > 8*LOG_RT_SIZE
  99. result ^= result >> 8*LOG_RT_SIZE;
  100. # endif
  101. # if CPP_WORDSZ > 4*LOG_RT_SIZE
  102. result ^= result >> 4*LOG_RT_SIZE;
  103. # endif
  104. result ^= result >> 2*LOG_RT_SIZE;
  105. result ^= result >> LOG_RT_SIZE;
  106. result &= (RT_SIZE-1);
  107. return(result);
  108. }
  109. /* Is a range starting at b already in the table? If so return a */
  110. /* pointer to it, else NULL. */
  111. GC_INNER void * GC_roots_present(ptr_t b)
  112. {
  113. int h = rt_hash(b);
  114. struct roots *p = GC_root_index[h];
  115. while (p != 0) {
  116. if (p -> r_start == (ptr_t)b) return(p);
  117. p = p -> r_next;
  118. }
  119. return NULL;
  120. }
  121. /* Add the given root structure to the index. */
  122. GC_INLINE void add_roots_to_index(struct roots *p)
  123. {
  124. int h = rt_hash(p -> r_start);
  125. p -> r_next = GC_root_index[h];
  126. GC_root_index[h] = p;
  127. }
  128. #endif /* !MSWIN32 && !MSWINCE && !CYGWIN32 */
  129. GC_INNER word GC_root_size = 0;
  130. GC_API void GC_CALL GC_add_roots(void *b, void *e)
  131. {
  132. DCL_LOCK_STATE;
  133. if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
  134. LOCK();
  135. GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
  136. UNLOCK();
  137. }
  138. /* Add [b,e) to the root set. Adding the same interval a second time */
  139. /* is a moderately fast no-op, and hence benign. We do not handle */
  140. /* different but overlapping intervals efficiently. (We do handle */
  141. /* them correctly.) */
  142. /* Tmp specifies that the interval may be deleted before */
  143. /* re-registering dynamic libraries. */
  144. void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
  145. {
  146. GC_ASSERT((word)b <= (word)e);
  147. b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
  148. /* round b up to word boundary */
  149. e = (ptr_t)((word)e & ~(word)(sizeof(word) - 1));
  150. /* round e down to word boundary */
  151. if ((word)b >= (word)e) return; /* nothing to do */
  152. # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  153. /* Spend the time to ensure that there are no overlapping */
  154. /* or adjacent intervals. */
  155. /* This could be done faster with e.g. a */
  156. /* balanced tree. But the execution time here is */
  157. /* virtually guaranteed to be dominated by the time it */
  158. /* takes to scan the roots. */
  159. {
  160. int i;
  161. struct roots * old = NULL; /* initialized to prevent warning. */
  162. for (i = 0; i < n_root_sets; i++) {
  163. old = GC_static_roots + i;
  164. if ((word)b <= (word)old->r_end
  165. && (word)e >= (word)old->r_start) {
  166. if ((word)b < (word)old->r_start) {
  167. GC_root_size += old->r_start - b;
  168. old -> r_start = b;
  169. }
  170. if ((word)e > (word)old->r_end) {
  171. GC_root_size += e - old->r_end;
  172. old -> r_end = e;
  173. }
  174. old -> r_tmp &= tmp;
  175. break;
  176. }
  177. }
  178. if (i < n_root_sets) {
  179. /* merge other overlapping intervals */
  180. struct roots *other;
  181. for (i++; i < n_root_sets; i++) {
  182. other = GC_static_roots + i;
  183. b = other -> r_start;
  184. e = other -> r_end;
  185. if ((word)b <= (word)old->r_end
  186. && (word)e >= (word)old->r_start) {
  187. if ((word)b < (word)old->r_start) {
  188. GC_root_size += old->r_start - b;
  189. old -> r_start = b;
  190. }
  191. if ((word)e > (word)old->r_end) {
  192. GC_root_size += e - old->r_end;
  193. old -> r_end = e;
  194. }
  195. old -> r_tmp &= other -> r_tmp;
  196. /* Delete this entry. */
  197. GC_root_size -= (other -> r_end - other -> r_start);
  198. other -> r_start = GC_static_roots[n_root_sets-1].r_start;
  199. other -> r_end = GC_static_roots[n_root_sets-1].r_end;
  200. n_root_sets--;
  201. }
  202. }
  203. return;
  204. }
  205. }
  206. # else
  207. {
  208. struct roots * old = (struct roots *)GC_roots_present(b);
  209. if (old != 0) {
  210. if ((word)e <= (word)old->r_end) {
  211. old -> r_tmp &= tmp;
  212. return; /* already there */
  213. }
  214. if (old -> r_tmp == tmp || !tmp) {
  215. /* Extend the existing root. */
  216. GC_root_size += e - old -> r_end;
  217. old -> r_end = e;
  218. old -> r_tmp = tmp;
  219. return;
  220. }
  221. b = old -> r_end;
  222. }
  223. }
  224. # endif
  225. if (n_root_sets == MAX_ROOT_SETS) {
  226. ABORT("Too many root sets");
  227. }
  228. # ifdef DEBUG_ADD_DEL_ROOTS
  229. GC_log_printf("Adding data root section %d: %p .. %p%s\n",
  230. n_root_sets, (void *)b, (void *)e,
  231. tmp ? " (temporary)" : "");
  232. # endif
  233. GC_static_roots[n_root_sets].r_start = (ptr_t)b;
  234. GC_static_roots[n_root_sets].r_end = (ptr_t)e;
  235. GC_static_roots[n_root_sets].r_tmp = tmp;
  236. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  237. GC_static_roots[n_root_sets].r_next = 0;
  238. add_roots_to_index(GC_static_roots + n_root_sets);
  239. # endif
  240. GC_root_size += e - b;
  241. n_root_sets++;
  242. }
  243. static GC_bool roots_were_cleared = FALSE;
  244. GC_API void GC_CALL GC_clear_roots(void)
  245. {
  246. DCL_LOCK_STATE;
  247. if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
  248. LOCK();
  249. roots_were_cleared = TRUE;
  250. n_root_sets = 0;
  251. GC_root_size = 0;
  252. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  253. BZERO(GC_root_index, RT_SIZE * sizeof(void *));
  254. # endif
  255. # ifdef DEBUG_ADD_DEL_ROOTS
  256. GC_log_printf("Clear all data root sections\n");
  257. # endif
  258. UNLOCK();
  259. }
  260. /* Internal use only; lock held. */
  261. STATIC void GC_remove_root_at_pos(int i)
  262. {
  263. # ifdef DEBUG_ADD_DEL_ROOTS
  264. GC_log_printf("Remove data root section at %d: %p .. %p%s\n",
  265. i, (void *)GC_static_roots[i].r_start,
  266. (void *)GC_static_roots[i].r_end,
  267. GC_static_roots[i].r_tmp ? " (temporary)" : "");
  268. # endif
  269. GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
  270. GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
  271. GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
  272. GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
  273. n_root_sets--;
  274. }
  275. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  276. STATIC void GC_rebuild_root_index(void)
  277. {
  278. int i;
  279. BZERO(GC_root_index, RT_SIZE * sizeof(void *));
  280. for (i = 0; i < n_root_sets; i++)
  281. add_roots_to_index(GC_static_roots + i);
  282. }
  283. #endif
  284. #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
  285. || defined(PCR) || defined(CYGWIN32)
  286. /* Internal use only; lock held. */
  287. STATIC void GC_remove_tmp_roots(void)
  288. {
  289. int i;
  290. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  291. int old_n_roots = n_root_sets;
  292. # endif
  293. for (i = 0; i < n_root_sets; ) {
  294. if (GC_static_roots[i].r_tmp) {
  295. GC_remove_root_at_pos(i);
  296. } else {
  297. i++;
  298. }
  299. }
  300. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  301. if (n_root_sets < old_n_roots)
  302. GC_rebuild_root_index();
  303. # endif
  304. }
  305. #endif
  306. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  307. STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e);
  308. GC_API void GC_CALL GC_remove_roots(void *b, void *e)
  309. {
  310. DCL_LOCK_STATE;
  311. /* Quick check whether has nothing to do */
  312. if ((((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1)) >=
  313. ((word)e & ~(word)(sizeof(word) - 1)))
  314. return;
  315. LOCK();
  316. GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
  317. UNLOCK();
  318. }
  319. /* Should only be called when the lock is held */
  320. STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e)
  321. {
  322. int i;
  323. GC_bool rebuild = FALSE;
  324. for (i = 0; i < n_root_sets; ) {
  325. if ((word)GC_static_roots[i].r_start >= (word)b
  326. && (word)GC_static_roots[i].r_end <= (word)e) {
  327. GC_remove_root_at_pos(i);
  328. rebuild = TRUE;
  329. } else {
  330. i++;
  331. }
  332. }
  333. if (rebuild)
  334. GC_rebuild_root_index();
  335. }
  336. #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
  337. #ifdef USE_PROC_FOR_LIBRARIES
  338. /* Remove given range from every static root which intersects with */
  339. /* the range. It is assumed GC_remove_tmp_roots is called before */
  340. /* this function is called repeatedly by GC_register_map_entries. */
  341. GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
  342. {
  343. int i;
  344. GC_bool rebuild = FALSE;
  345. GC_ASSERT(I_HOLD_LOCK());
  346. GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
  347. for (i = 0; i < n_root_sets; i++) {
  348. ptr_t r_start, r_end;
  349. if (GC_static_roots[i].r_tmp) {
  350. /* The remaining roots are skipped as they are all temporary. */
  351. # ifdef GC_ASSERTIONS
  352. int j;
  353. for (j = i + 1; j < n_root_sets; j++) {
  354. GC_ASSERT(GC_static_roots[j].r_tmp);
  355. }
  356. # endif
  357. break;
  358. }
  359. r_start = GC_static_roots[i].r_start;
  360. r_end = GC_static_roots[i].r_end;
  361. if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
  362. # ifdef DEBUG_ADD_DEL_ROOTS
  363. GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
  364. (void *)b, (void *)e,
  365. i, (void *)r_start, (void *)r_end);
  366. # endif
  367. if ((word)r_start < (word)b) {
  368. GC_root_size -= r_end - b;
  369. GC_static_roots[i].r_end = b;
  370. /* No need to rebuild as hash does not use r_end value. */
  371. if ((word)e < (word)r_end) {
  372. int j;
  373. if (rebuild) {
  374. GC_rebuild_root_index();
  375. rebuild = FALSE;
  376. }
  377. GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
  378. for (j = i + 1; j < n_root_sets; j++)
  379. if (GC_static_roots[j].r_tmp)
  380. break;
  381. if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
  382. /* Exchange the roots to have all temporary ones at the end. */
  383. ptr_t tmp_r_start = GC_static_roots[j].r_start;
  384. ptr_t tmp_r_end = GC_static_roots[j].r_end;
  385. GC_static_roots[j].r_start =
  386. GC_static_roots[n_root_sets-1].r_start;
  387. GC_static_roots[j].r_end = GC_static_roots[n_root_sets-1].r_end;
  388. GC_static_roots[j].r_tmp = FALSE;
  389. GC_static_roots[n_root_sets-1].r_start = tmp_r_start;
  390. GC_static_roots[n_root_sets-1].r_end = tmp_r_end;
  391. GC_static_roots[n_root_sets-1].r_tmp = TRUE;
  392. rebuild = TRUE;
  393. }
  394. }
  395. } else {
  396. if ((word)e < (word)r_end) {
  397. GC_root_size -= e - r_start;
  398. GC_static_roots[i].r_start = e;
  399. } else {
  400. GC_remove_root_at_pos(i);
  401. i--;
  402. }
  403. rebuild = TRUE;
  404. }
  405. }
  406. }
  407. if (rebuild)
  408. GC_rebuild_root_index();
  409. }
  410. #endif /* USE_PROC_FOR_LIBRARIES */
  411. #if !defined(NO_DEBUGGING)
  412. /* For the debugging purpose only. */
  413. /* Workaround for the OS mapping and unmapping behind our back: */
  414. /* Is the address p in one of the temporary static root sections? */
  415. GC_API int GC_CALL GC_is_tmp_root(void *p)
  416. {
  417. static int last_root_set = MAX_ROOT_SETS;
  418. int i;
  419. if (last_root_set < n_root_sets
  420. && (word)p >= (word)GC_static_roots[last_root_set].r_start
  421. && (word)p < (word)GC_static_roots[last_root_set].r_end)
  422. return GC_static_roots[last_root_set].r_tmp;
  423. for (i = 0; i < n_root_sets; i++) {
  424. if ((word)p >= (word)GC_static_roots[i].r_start
  425. && (word)p < (word)GC_static_roots[i].r_end) {
  426. last_root_set = i;
  427. return GC_static_roots[i].r_tmp;
  428. }
  429. }
  430. return(FALSE);
  431. }
  432. #endif /* !NO_DEBUGGING */
  433. GC_INNER ptr_t GC_approx_sp(void)
  434. {
  435. volatile word sp;
  436. # if (defined(CPPCHECK) || (__GNUC__ >= 4)) && !defined(STACK_NOT_SCANNED) /* GC_GNUC_PREREQ(4, 0) */
  437. /* TODO: Use GC_GNUC_PREREQ after fixing a bug in cppcheck. */
  438. sp = (word)__builtin_frame_address(0);
  439. # else
  440. sp = (word)&sp;
  441. # endif
  442. /* Also force stack to grow if necessary. Otherwise the */
  443. /* later accesses might cause the kernel to think we're */
  444. /* doing something wrong. */
  445. return((ptr_t)sp);
  446. }
  447. /*
  448. * Data structure for excluded static roots.
  449. * Real declaration is in gc_priv.h.
  450. struct exclusion {
  451. ptr_t e_start;
  452. ptr_t e_end;
  453. };
  454. struct exclusion GC_excl_table[MAX_EXCLUSIONS];
  455. -- Array of exclusions, ascending
  456. -- address order.
  457. */
  458. STATIC size_t GC_excl_table_entries = 0;/* Number of entries in use. */
  459. GC_API void GC_CALL GC_clear_exclusion_table(void)
  460. {
  461. GC_excl_table_entries = 0;
  462. }
  463. /* Return the first exclusion range that includes an address >= start_addr */
  464. /* Assumes the exclusion table contains at least one entry (namely the */
  465. /* GC data structures). */
  466. STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr)
  467. {
  468. size_t low = 0;
  469. size_t high = GC_excl_table_entries - 1;
  470. while (high > low) {
  471. size_t mid = (low + high) >> 1;
  472. /* low <= mid < high */
  473. if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
  474. low = mid + 1;
  475. } else {
  476. high = mid;
  477. }
  478. }
  479. if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
  480. return GC_excl_table + low;
  481. }
  482. /* Should only be called when the lock is held. The range boundaries */
  483. /* should be properly aligned and valid. */
  484. GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
  485. {
  486. struct exclusion * next;
  487. size_t next_index;
  488. GC_ASSERT((word)start % sizeof(word) == 0);
  489. GC_ASSERT((word)start < (word)finish);
  490. if (0 == GC_excl_table_entries) {
  491. next = 0;
  492. } else {
  493. next = GC_next_exclusion((ptr_t)start);
  494. }
  495. if (0 != next) {
  496. size_t i;
  497. if ((word)(next -> e_start) < (word) finish) {
  498. /* incomplete error check. */
  499. ABORT("Exclusion ranges overlap");
  500. }
  501. if ((word)(next -> e_start) == (word) finish) {
  502. /* extend old range backwards */
  503. next -> e_start = (ptr_t)start;
  504. return;
  505. }
  506. next_index = next - GC_excl_table;
  507. for (i = GC_excl_table_entries; i > next_index; --i) {
  508. GC_excl_table[i] = GC_excl_table[i-1];
  509. }
  510. } else {
  511. next_index = GC_excl_table_entries;
  512. }
  513. if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
  514. GC_excl_table[next_index].e_start = (ptr_t)start;
  515. GC_excl_table[next_index].e_end = (ptr_t)finish;
  516. ++GC_excl_table_entries;
  517. }
  518. GC_API void GC_CALL GC_exclude_static_roots(void *b, void *e)
  519. {
  520. DCL_LOCK_STATE;
  521. if (b == e) return; /* nothing to exclude? */
  522. /* Round boundaries (in direction reverse to that of GC_add_roots). */
  523. b = (void *)((word)b & ~(word)(sizeof(word) - 1));
  524. e = (void *)(((word)e + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
  525. if (NULL == e)
  526. e = (void *)(~(word)(sizeof(word) - 1)); /* handle overflow */
  527. LOCK();
  528. GC_exclude_static_roots_inner(b, e);
  529. UNLOCK();
  530. }
  531. #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
  532. # define GC_PUSH_CONDITIONAL(b, t, all) \
  533. (GC_parallel \
  534. ? GC_push_conditional_eager(b, t, all) \
  535. : GC_push_conditional(b, t, all))
  536. #elif defined(GC_DISABLE_INCREMENTAL)
  537. # define GC_PUSH_CONDITIONAL(b, t, all) GC_push_all(b, t)
  538. #else
  539. # define GC_PUSH_CONDITIONAL(b, t, all) GC_push_conditional(b, t, all)
  540. /* Do either of GC_push_all or GC_push_selected */
  541. /* depending on the third arg. */
  542. #endif
  543. /* Invoke push_conditional on ranges that are not excluded. */
  544. STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
  545. GC_bool all GC_ATTR_UNUSED)
  546. {
  547. while ((word)bottom < (word)top) {
  548. struct exclusion *next = GC_next_exclusion(bottom);
  549. ptr_t excl_start;
  550. if (0 == next
  551. || (word)(excl_start = next -> e_start) >= (word)top) {
  552. GC_PUSH_CONDITIONAL(bottom, top, all);
  553. break;
  554. }
  555. if ((word)excl_start > (word)bottom)
  556. GC_PUSH_CONDITIONAL(bottom, excl_start, all);
  557. bottom = next -> e_end;
  558. }
  559. }
  560. #ifdef IA64
  561. /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */
  562. GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi,
  563. int eager, struct GC_traced_stack_sect_s *traced_stack_sect)
  564. {
  565. while (traced_stack_sect != NULL) {
  566. ptr_t frame_bs_lo = traced_stack_sect -> backing_store_end;
  567. GC_ASSERT((word)frame_bs_lo <= (word)bs_hi);
  568. if (eager) {
  569. GC_push_all_eager(frame_bs_lo, bs_hi);
  570. } else {
  571. GC_push_all_stack(frame_bs_lo, bs_hi);
  572. }
  573. bs_hi = traced_stack_sect -> saved_backing_store_ptr;
  574. traced_stack_sect = traced_stack_sect -> prev;
  575. }
  576. GC_ASSERT((word)bs_lo <= (word)bs_hi);
  577. if (eager) {
  578. GC_push_all_eager(bs_lo, bs_hi);
  579. } else {
  580. GC_push_all_stack(bs_lo, bs_hi);
  581. }
  582. }
  583. #endif /* IA64 */
  584. #ifdef THREADS
  585. GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
  586. struct GC_traced_stack_sect_s *traced_stack_sect)
  587. {
  588. while (traced_stack_sect != NULL) {
  589. GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect);
  590. # ifdef STACK_GROWS_UP
  591. GC_push_all_stack((ptr_t)traced_stack_sect, lo);
  592. # else /* STACK_GROWS_DOWN */
  593. GC_push_all_stack(lo, (ptr_t)traced_stack_sect);
  594. # endif
  595. lo = traced_stack_sect -> saved_stack_ptr;
  596. GC_ASSERT(lo != NULL);
  597. traced_stack_sect = traced_stack_sect -> prev;
  598. }
  599. GC_ASSERT(!((word)hi HOTTER_THAN (word)lo));
  600. # ifdef STACK_GROWS_UP
  601. /* We got them backwards! */
  602. GC_push_all_stack(hi, lo);
  603. # else /* STACK_GROWS_DOWN */
  604. GC_push_all_stack(lo, hi);
  605. # endif
  606. }
  607. #else /* !THREADS */
  608. /* Similar to GC_push_all_eager, but only the */
  609. /* part hotter than cold_gc_frame is scanned */
  610. /* immediately. Needed to ensure that callee- */
  611. /* save registers are not missed. */
  612. /*
  613. * A version of GC_push_all that treats all interior pointers as valid
  614. * and scans part of the area immediately, to make sure that saved
  615. * register values are not lost.
  616. * Cold_gc_frame delimits the stack section that must be scanned
  617. * eagerly. A zero value indicates that no eager scanning is needed.
  618. * We don't need to worry about the MANUAL_VDB case here, since this
  619. * is only called in the single-threaded case. We assume that we
  620. * cannot collect between an assignment and the corresponding
  621. * GC_dirty() call.
  622. */
  623. STATIC void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
  624. ptr_t cold_gc_frame)
  625. {
  626. #ifndef NEED_FIXUP_POINTER
  627. if (GC_all_interior_pointers) {
  628. /* Push the hot end of the stack eagerly, so that register values */
  629. /* saved inside GC frames are marked before they disappear. */
  630. /* The rest of the marking can be deferred until later. */
  631. if (0 == cold_gc_frame) {
  632. GC_push_all_stack(bottom, top);
  633. return;
  634. }
  635. GC_ASSERT((word)bottom <= (word)cold_gc_frame
  636. && (word)cold_gc_frame <= (word)top);
  637. # ifdef STACK_GROWS_DOWN
  638. GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
  639. GC_push_all_eager(bottom, cold_gc_frame);
  640. # else /* STACK_GROWS_UP */
  641. GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
  642. GC_push_all_eager(cold_gc_frame, top);
  643. # endif /* STACK_GROWS_UP */
  644. } else
  645. #endif
  646. /* else */ {
  647. GC_push_all_eager(bottom, top);
  648. }
  649. # ifdef TRACE_BUF
  650. GC_add_trace_entry("GC_push_all_stack", (word)bottom, (word)top);
  651. # endif
  652. }
  653. /* Similar to GC_push_all_stack_sections() but also uses cold_gc_frame. */
  654. STATIC void GC_push_all_stack_part_eager_sections(ptr_t lo, ptr_t hi,
  655. ptr_t cold_gc_frame, struct GC_traced_stack_sect_s *traced_stack_sect)
  656. {
  657. GC_ASSERT(traced_stack_sect == NULL || cold_gc_frame == NULL ||
  658. (word)cold_gc_frame HOTTER_THAN (word)traced_stack_sect);
  659. while (traced_stack_sect != NULL) {
  660. GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect);
  661. # ifdef STACK_GROWS_UP
  662. GC_push_all_stack_partially_eager((ptr_t)traced_stack_sect, lo,
  663. cold_gc_frame);
  664. # else /* STACK_GROWS_DOWN */
  665. GC_push_all_stack_partially_eager(lo, (ptr_t)traced_stack_sect,
  666. cold_gc_frame);
  667. # endif
  668. lo = traced_stack_sect -> saved_stack_ptr;
  669. GC_ASSERT(lo != NULL);
  670. traced_stack_sect = traced_stack_sect -> prev;
  671. cold_gc_frame = NULL; /* Use at most once. */
  672. }
  673. GC_ASSERT(!((word)hi HOTTER_THAN (word)lo));
  674. # ifdef STACK_GROWS_UP
  675. /* We got them backwards! */
  676. GC_push_all_stack_partially_eager(hi, lo, cold_gc_frame);
  677. # else /* STACK_GROWS_DOWN */
  678. GC_push_all_stack_partially_eager(lo, hi, cold_gc_frame);
  679. # endif
  680. }
  681. #endif /* !THREADS */
  682. /* Push enough of the current stack eagerly to */
  683. /* ensure that callee-save registers saved in */
  684. /* GC frames are scanned. */
  685. /* In the non-threads case, schedule entire */
  686. /* stack for scanning. */
  687. /* The second argument is a pointer to the */
  688. /* (possibly null) thread context, for */
  689. /* (currently hypothetical) more precise */
  690. /* stack scanning. */
  691. /*
  692. * In the absence of threads, push the stack contents.
  693. * In the presence of threads, push enough of the current stack
  694. * to ensure that callee-save registers saved in collector frames have been
  695. * seen.
  696. * FIXME: Merge with per-thread stuff.
  697. */
  698. STATIC void GC_push_current_stack(ptr_t cold_gc_frame,
  699. void * context GC_ATTR_UNUSED)
  700. {
  701. # if defined(THREADS)
  702. if (0 == cold_gc_frame) return;
  703. # ifdef STACK_GROWS_DOWN
  704. GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
  705. /* For IA64, the register stack backing store is handled */
  706. /* in the thread-specific code. */
  707. # else
  708. GC_push_all_eager(cold_gc_frame, GC_approx_sp());
  709. # endif
  710. # else
  711. GC_push_all_stack_part_eager_sections(GC_approx_sp(), GC_stackbottom,
  712. cold_gc_frame, GC_traced_stack_sect);
  713. # ifdef IA64
  714. /* We also need to push the register stack backing store. */
  715. /* This should really be done in the same way as the */
  716. /* regular stack. For now we fudge it a bit. */
  717. /* Note that the backing store grows up, so we can't use */
  718. /* GC_push_all_stack_partially_eager. */
  719. {
  720. ptr_t bsp = GC_save_regs_ret_val;
  721. ptr_t cold_gc_bs_pointer = bsp - 2048;
  722. if (GC_all_interior_pointers
  723. && (word)cold_gc_bs_pointer > (word)BACKING_STORE_BASE) {
  724. /* Adjust cold_gc_bs_pointer if below our innermost */
  725. /* "traced stack section" in backing store. */
  726. if (GC_traced_stack_sect != NULL
  727. && (word)cold_gc_bs_pointer
  728. < (word)GC_traced_stack_sect->backing_store_end)
  729. cold_gc_bs_pointer =
  730. GC_traced_stack_sect->backing_store_end;
  731. GC_push_all_register_sections(BACKING_STORE_BASE,
  732. cold_gc_bs_pointer, FALSE, GC_traced_stack_sect);
  733. GC_push_all_eager(cold_gc_bs_pointer, bsp);
  734. } else {
  735. GC_push_all_register_sections(BACKING_STORE_BASE, bsp,
  736. TRUE /* eager */, GC_traced_stack_sect);
  737. }
  738. /* All values should be sufficiently aligned that we */
  739. /* don't have to worry about the boundary. */
  740. }
  741. # endif
  742. # endif /* !THREADS */
  743. }
  744. GC_INNER void (*GC_push_typed_structures)(void) = 0;
  745. /* Push GC internal roots. These are normally */
  746. /* included in the static data segment, and */
  747. /* Thus implicitly pushed. But we must do this */
  748. /* explicitly if normal root processing is */
  749. /* disabled. */
  750. /*
  751. * Push GC internal roots. Only called if there is some reason to believe
  752. * these would not otherwise get registered.
  753. */
  754. STATIC void GC_push_gc_structures(void)
  755. {
  756. # ifndef GC_NO_FINALIZATION
  757. GC_push_finalizer_structures();
  758. # endif
  759. # if defined(THREADS)
  760. GC_push_thread_structures();
  761. # endif
  762. if( GC_push_typed_structures )
  763. GC_push_typed_structures();
  764. }
  765. GC_INNER void GC_cond_register_dynamic_libraries(void)
  766. {
  767. # if (defined(DYNAMIC_LOADING) && !defined(MSWIN_XBOX1)) \
  768. || defined(CYGWIN32) || defined(MSWIN32) || defined(MSWINCE) \
  769. || defined(PCR)
  770. GC_remove_tmp_roots();
  771. if (!GC_no_dls) GC_register_dynamic_libraries();
  772. # else
  773. GC_no_dls = TRUE;
  774. # endif
  775. }
  776. STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame)
  777. {
  778. GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
  779. }
  780. /*
  781. * Call the mark routines (GC_push_one for a single pointer,
  782. * GC_push_conditional on groups of pointers) on every top level
  783. * accessible pointer.
  784. * If all is FALSE, arrange to push only possibly altered values.
  785. * Cold_gc_frame is an address inside a GC frame that
  786. * remains valid until all marking is complete.
  787. * A zero value indicates that it's OK to miss some
  788. * register values.
  789. */
  790. GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame GC_ATTR_UNUSED)
  791. {
  792. int i;
  793. unsigned kind;
  794. /*
  795. * Next push static data. This must happen early on, since it's
  796. * not robust against mark stack overflow.
  797. */
  798. /* Re-register dynamic libraries, in case one got added. */
  799. /* There is some argument for doing this as late as possible, */
  800. /* especially on win32, where it can change asynchronously. */
  801. /* In those cases, we do it here. But on other platforms, it's */
  802. /* not safe with the world stopped, so we do it earlier. */
  803. # if !defined(REGISTER_LIBRARIES_EARLY)
  804. GC_cond_register_dynamic_libraries();
  805. # endif
  806. /* Mark everything in static data areas */
  807. for (i = 0; i < n_root_sets; i++) {
  808. GC_push_conditional_with_exclusions(
  809. GC_static_roots[i].r_start,
  810. GC_static_roots[i].r_end, all);
  811. }
  812. /* Mark all free list header blocks, if those were allocated from */
  813. /* the garbage collected heap. This makes sure they don't */
  814. /* disappear if we are not marking from static data. It also */
  815. /* saves us the trouble of scanning them, and possibly that of */
  816. /* marking the freelists. */
  817. for (kind = 0; kind < GC_n_kinds; kind++) {
  818. void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
  819. if (0 != base) {
  820. GC_set_mark_bit(base);
  821. }
  822. }
  823. /* Mark from GC internal roots if those might otherwise have */
  824. /* been excluded. */
  825. if (GC_no_dls || roots_were_cleared) {
  826. GC_push_gc_structures();
  827. }
  828. /* Mark thread local free lists, even if their mark */
  829. /* descriptor excludes the link field. */
  830. /* If the world is not stopped, this is unsafe. It is */
  831. /* also unnecessary, since we will do this again with the */
  832. /* world stopped. */
  833. # if defined(THREAD_LOCAL_ALLOC)
  834. if (GC_world_stopped) GC_mark_thread_local_free_lists();
  835. # endif
  836. /*
  837. * Now traverse stacks, and mark from register contents.
  838. * These must be done last, since they can legitimately overflow
  839. * the mark stack.
  840. * This is usually done by saving the current context on the
  841. * stack, and then just tracing from the stack.
  842. */
  843. # ifndef STACK_NOT_SCANNED
  844. GC_push_regs_and_stack(cold_gc_frame);
  845. # endif
  846. if (GC_push_other_roots != 0) (*GC_push_other_roots)();
  847. /* In the threads case, this also pushes thread stacks. */
  848. /* Note that without interior pointer recognition lots */
  849. /* of stuff may have been pushed already, and this */
  850. /* should be careful about mark stack overflows. */
  851. }