reclaim.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. #include "private/gc_priv.h"
  17. #ifdef ENABLE_DISCLAIM
  18. # include "gc_disclaim.h"
  19. #endif
  20. #include <stdio.h>
  21. GC_INNER signed_word GC_bytes_found = 0;
  22. /* Number of bytes of memory reclaimed */
  23. /* minus the number of bytes originally */
  24. /* on free lists which we had to drop. */
  25. #if defined(PARALLEL_MARK)
  26. GC_INNER signed_word GC_fl_builder_count = 0;
  27. /* Number of threads currently building free lists without */
  28. /* holding GC lock. It is not safe to collect if this is */
  29. /* nonzero. Also, together with the mark lock, it is used as */
  30. /* a semaphore during marker threads startup. */
  31. #endif /* PARALLEL_MARK */
  32. /* We defer printing of leaked objects until we're done with the GC */
  33. /* cycle, since the routine for printing objects needs to run outside */
  34. /* the collector, e.g. without the allocation lock. */
  35. #ifndef MAX_LEAKED
  36. # define MAX_LEAKED 40
  37. #endif
  38. STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
  39. STATIC unsigned GC_n_leaked = 0;
  40. GC_INNER GC_bool GC_have_errors = FALSE;
  41. #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
  42. STATIC void GC_reclaim_unconditionally_marked(void);
  43. #endif
  44. GC_INLINE void GC_add_leaked(ptr_t leaked)
  45. {
  46. # ifndef SHORT_DBG_HDRS
  47. if (GC_findleak_delay_free && !GC_check_leaked(leaked))
  48. return;
  49. # endif
  50. GC_have_errors = TRUE;
  51. if (GC_n_leaked < MAX_LEAKED) {
  52. GC_leaked[GC_n_leaked++] = leaked;
  53. /* Make sure it's not reclaimed this cycle */
  54. GC_set_mark_bit(leaked);
  55. }
  56. }
  57. /* Print all objects on the list after printing any smashed objects. */
  58. /* Clear both lists. Called without the allocation lock held. */
  59. GC_INNER void GC_print_all_errors(void)
  60. {
  61. static GC_bool printing_errors = FALSE;
  62. GC_bool have_errors;
  63. unsigned i, n_leaked;
  64. ptr_t leaked[MAX_LEAKED];
  65. DCL_LOCK_STATE;
  66. LOCK();
  67. if (printing_errors) {
  68. UNLOCK();
  69. return;
  70. }
  71. have_errors = GC_have_errors;
  72. printing_errors = TRUE;
  73. n_leaked = GC_n_leaked;
  74. if (n_leaked > 0) {
  75. GC_ASSERT(n_leaked <= MAX_LEAKED);
  76. BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
  77. GC_n_leaked = 0;
  78. BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
  79. }
  80. UNLOCK();
  81. if (GC_debugging_started) {
  82. GC_print_all_smashed();
  83. } else {
  84. have_errors = FALSE;
  85. }
  86. if (n_leaked > 0) {
  87. GC_err_printf("Found %u leaked objects:\n", n_leaked);
  88. have_errors = TRUE;
  89. }
  90. for (i = 0; i < n_leaked; i++) {
  91. ptr_t p = leaked[i];
  92. GC_print_heap_obj(p);
  93. GC_free(p);
  94. }
  95. if (have_errors
  96. # ifndef GC_ABORT_ON_LEAK
  97. && GETENV("GC_ABORT_ON_LEAK") != NULL
  98. # endif
  99. ) {
  100. ABORT("Leaked or smashed objects encountered");
  101. }
  102. LOCK();
  103. printing_errors = FALSE;
  104. UNLOCK();
  105. }
  106. /*
  107. * reclaim phase
  108. *
  109. */
  110. /* Test whether a block is completely empty, i.e. contains no marked */
  111. /* objects. This does not require the block to be in physical memory. */
  112. GC_INNER GC_bool GC_block_empty(hdr *hhdr)
  113. {
  114. return (hhdr -> hb_n_marks == 0);
  115. }
  116. STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
  117. {
  118. return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
  119. }
  120. /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
  121. /* and USE_MARK_BITS cases. */
  122. /*
  123. * Restore unmarked small objects in h of size sz to the object
  124. * free list. Returns the new list.
  125. * Clears unmarked objects. Sz is in bytes.
  126. */
  127. STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, word sz,
  128. ptr_t list, signed_word *count)
  129. {
  130. word bit_no = 0;
  131. word *p, *q, *plim;
  132. signed_word n_bytes_found = 0;
  133. GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
  134. GC_ASSERT(sz == hhdr -> hb_sz);
  135. GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
  136. p = (word *)(hbp->hb_body);
  137. plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
  138. /* go through all words in block */
  139. while ((word)p <= (word)plim) {
  140. if (mark_bit_from_hdr(hhdr, bit_no)) {
  141. p = (word *)((ptr_t)p + sz);
  142. } else {
  143. n_bytes_found += sz;
  144. /* object is available - put on list */
  145. obj_link(p) = list;
  146. list = ((ptr_t)p);
  147. /* Clear object, advance p to next object in the process */
  148. q = (word *)((ptr_t)p + sz);
  149. # ifdef USE_MARK_BYTES
  150. GC_ASSERT(!(sz & 1)
  151. && !((word)p & (2 * sizeof(word) - 1)));
  152. p[1] = 0;
  153. p += 2;
  154. while ((word)p < (word)q) {
  155. CLEAR_DOUBLE(p);
  156. p += 2;
  157. }
  158. # else
  159. p++; /* Skip link field */
  160. while ((word)p < (word)q) {
  161. *p++ = 0;
  162. }
  163. # endif
  164. }
  165. bit_no += MARK_BIT_OFFSET(sz);
  166. }
  167. *count += n_bytes_found;
  168. return(list);
  169. }
  170. /* The same thing, but don't clear objects: */
  171. STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, word sz,
  172. ptr_t list, signed_word *count)
  173. {
  174. word bit_no = 0;
  175. word *p, *plim;
  176. signed_word n_bytes_found = 0;
  177. GC_ASSERT(sz == hhdr -> hb_sz);
  178. p = (word *)(hbp->hb_body);
  179. plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
  180. /* go through all words in block */
  181. while ((word)p <= (word)plim) {
  182. if (!mark_bit_from_hdr(hhdr, bit_no)) {
  183. n_bytes_found += sz;
  184. /* object is available - put on list */
  185. obj_link(p) = list;
  186. list = ((ptr_t)p);
  187. }
  188. p = (word *)((ptr_t)p + sz);
  189. bit_no += MARK_BIT_OFFSET(sz);
  190. }
  191. *count += n_bytes_found;
  192. return(list);
  193. }
  194. #ifdef ENABLE_DISCLAIM
  195. /* Call reclaim notifier for block's kind on each unmarked object in */
  196. /* block, all within a pair of corresponding enter/leave callbacks. */
  197. STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, word sz,
  198. ptr_t list, signed_word *count)
  199. {
  200. word bit_no = 0;
  201. word *p, *q, *plim;
  202. signed_word n_bytes_found = 0;
  203. struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
  204. int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
  205. GC_ASSERT(sz == hhdr -> hb_sz);
  206. p = (word *)(hbp -> hb_body);
  207. plim = (word *)((ptr_t)p + HBLKSIZE - sz);
  208. while ((word)p <= (word)plim) {
  209. int marked = mark_bit_from_hdr(hhdr, bit_no);
  210. if (!marked && (*disclaim)(p)) {
  211. hhdr -> hb_n_marks++;
  212. marked = 1;
  213. }
  214. if (marked)
  215. p = (word *)((ptr_t)p + sz);
  216. else {
  217. n_bytes_found += sz;
  218. /* object is available - put on list */
  219. obj_link(p) = list;
  220. list = ((ptr_t)p);
  221. /* Clear object, advance p to next object in the process */
  222. q = (word *)((ptr_t)p + sz);
  223. # ifdef USE_MARK_BYTES
  224. GC_ASSERT((sz & 1) == 0);
  225. GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
  226. p[1] = 0;
  227. p += 2;
  228. while ((word)p < (word)q) {
  229. CLEAR_DOUBLE(p);
  230. p += 2;
  231. }
  232. # else
  233. p++; /* Skip link field */
  234. while ((word)p < (word)q) {
  235. *p++ = 0;
  236. }
  237. # endif
  238. }
  239. bit_no += MARK_BIT_OFFSET(sz);
  240. }
  241. *count += n_bytes_found;
  242. return list;
  243. }
  244. #endif /* ENABLE_DISCLAIM */
  245. /* Don't really reclaim objects, just check for unmarked ones: */
  246. STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
  247. {
  248. word bit_no;
  249. ptr_t p, plim;
  250. GC_ASSERT(sz == hhdr -> hb_sz);
  251. /* go through all words in block */
  252. p = hbp->hb_body;
  253. plim = p + HBLKSIZE - sz;
  254. for (bit_no = 0; (word)p <= (word)plim;
  255. p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
  256. if (!mark_bit_from_hdr(hhdr, bit_no)) {
  257. GC_add_leaked(p);
  258. }
  259. }
  260. }
  261. /* Is a pointer-free block? Same as IS_PTRFREE macro (in os_dep.c) but */
  262. /* uses unordered atomic access to avoid racing with GC_realloc. */
  263. #ifdef AO_HAVE_load
  264. # define IS_PTRFREE_SAFE(hhdr) \
  265. (AO_load((volatile AO_t *)&(hhdr)->hb_descr) == 0)
  266. #else
  267. /* No race as GC_realloc holds the lock while updating hb_descr. */
  268. # define IS_PTRFREE_SAFE(hhdr) ((hhdr)->hb_descr == 0)
  269. #endif
  270. /*
  271. * Generic procedure to rebuild a free list in hbp.
  272. * Also called directly from GC_malloc_many.
  273. * Sz is now in bytes.
  274. */
  275. GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
  276. GC_bool init, ptr_t list,
  277. signed_word *count)
  278. {
  279. ptr_t result;
  280. GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
  281. # ifndef GC_DISABLE_INCREMENTAL
  282. GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
  283. # endif
  284. # ifdef ENABLE_DISCLAIM
  285. if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
  286. result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
  287. } else
  288. # endif
  289. /* else */ if (init || GC_debugging_started) {
  290. result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
  291. } else {
  292. GC_ASSERT(IS_PTRFREE_SAFE(hhdr));
  293. result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
  294. }
  295. if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
  296. return result;
  297. }
  298. /*
  299. * Restore unmarked small objects in the block pointed to by hbp
  300. * to the appropriate object free list.
  301. * If entirely empty blocks are to be completely deallocated, then
  302. * caller should perform that check.
  303. */
  304. STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
  305. GC_bool report_if_found)
  306. {
  307. hdr *hhdr = HDR(hbp);
  308. word sz = hhdr -> hb_sz;
  309. struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
  310. void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
  311. hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
  312. if (report_if_found) {
  313. GC_reclaim_check(hbp, hhdr, sz);
  314. } else {
  315. *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
  316. (ptr_t)(*flh), &GC_bytes_found);
  317. }
  318. }
  319. #ifdef ENABLE_DISCLAIM
  320. STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
  321. {
  322. hdr *hhdr = HDR(hbp);
  323. word sz = hhdr -> hb_sz;
  324. struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
  325. void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
  326. void *flh_next;
  327. hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
  328. flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
  329. (ptr_t)(*flh), &GC_bytes_found);
  330. if (hhdr -> hb_n_marks)
  331. *flh = flh_next;
  332. else {
  333. GC_bytes_found += HBLKSIZE;
  334. GC_freehblk(hbp);
  335. }
  336. }
  337. #endif /* ENABLE_DISCLAIM */
  338. /*
  339. * Restore an unmarked large object or an entirely empty blocks of small objects
  340. * to the heap block free list.
  341. * Otherwise enqueue the block for later processing
  342. * by GC_reclaim_small_nonempty_block.
  343. * If report_if_found is TRUE, then process any block immediately, and
  344. * simply report free objects; do not actually reclaim them.
  345. */
  346. STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
  347. {
  348. hdr * hhdr = HDR(hbp);
  349. word sz = hhdr -> hb_sz; /* size of objects in current block */
  350. struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
  351. if( sz > MAXOBJBYTES ) { /* 1 big object */
  352. if( !mark_bit_from_hdr(hhdr, 0) ) {
  353. if (report_if_found) {
  354. GC_add_leaked((ptr_t)hbp);
  355. } else {
  356. word blocks;
  357. # ifdef ENABLE_DISCLAIM
  358. if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
  359. struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
  360. if ((*ok->ok_disclaim_proc)(hbp)) {
  361. /* Not disclaimed => resurrect the object. */
  362. set_mark_bit_from_hdr(hhdr, 0);
  363. goto in_use;
  364. }
  365. }
  366. # endif
  367. blocks = OBJ_SZ_TO_BLOCKS(sz);
  368. if (blocks > 1) {
  369. GC_large_allocd_bytes -= blocks * HBLKSIZE;
  370. }
  371. GC_bytes_found += sz;
  372. GC_freehblk(hbp);
  373. }
  374. } else {
  375. # ifdef ENABLE_DISCLAIM
  376. in_use:
  377. # endif
  378. if (IS_PTRFREE_SAFE(hhdr)) {
  379. GC_atomic_in_use += sz;
  380. } else {
  381. GC_composite_in_use += sz;
  382. }
  383. }
  384. } else {
  385. GC_bool empty = GC_block_empty(hhdr);
  386. # ifdef PARALLEL_MARK
  387. /* Count can be low or one too high because we sometimes */
  388. /* have to ignore decrements. Objects can also potentially */
  389. /* be repeatedly marked by each marker. */
  390. /* Here we assume two markers, but this is extremely */
  391. /* unlikely to fail spuriously with more. And if it does, it */
  392. /* should be looked at. */
  393. GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
  394. # else
  395. GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
  396. # endif
  397. if (report_if_found) {
  398. GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
  399. } else if (empty) {
  400. # ifdef ENABLE_DISCLAIM
  401. if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
  402. GC_disclaim_and_reclaim_or_free_small_block(hbp);
  403. } else
  404. # endif
  405. /* else */ {
  406. GC_bytes_found += HBLKSIZE;
  407. GC_freehblk(hbp);
  408. }
  409. } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
  410. /* group of smaller objects, enqueue the real work */
  411. struct hblk **rlh = ok -> ok_reclaim_list;
  412. if (rlh != NULL) {
  413. rlh += BYTES_TO_GRANULES(sz);
  414. hhdr -> hb_next = *rlh;
  415. *rlh = hbp;
  416. }
  417. } /* else not worth salvaging. */
  418. /* We used to do the nearly_full check later, but we */
  419. /* already have the right cache context here. Also */
  420. /* doing it here avoids some silly lock contention in */
  421. /* GC_malloc_many. */
  422. if (IS_PTRFREE_SAFE(hhdr)) {
  423. GC_atomic_in_use += sz * hhdr -> hb_n_marks;
  424. } else {
  425. GC_composite_in_use += sz * hhdr -> hb_n_marks;
  426. }
  427. }
  428. }
  429. #if !defined(NO_DEBUGGING)
  430. /* Routines to gather and print heap block info */
  431. /* intended for debugging. Otherwise should be called */
  432. /* with lock. */
  433. struct Print_stats
  434. {
  435. size_t number_of_blocks;
  436. size_t total_bytes;
  437. };
  438. #ifdef USE_MARK_BYTES
  439. /* Return the number of set mark bits in the given header. */
  440. /* Remains externally visible as used by GNU GCJ currently. */
  441. int GC_n_set_marks(hdr *hhdr)
  442. {
  443. int result = 0;
  444. int i;
  445. word sz = hhdr -> hb_sz;
  446. int offset = (int)MARK_BIT_OFFSET(sz);
  447. int limit = (int)FINAL_MARK_BIT(sz);
  448. for (i = 0; i < limit; i += offset) {
  449. result += hhdr -> hb_marks[i];
  450. }
  451. GC_ASSERT(hhdr -> hb_marks[limit]);
  452. return(result);
  453. }
  454. #else
  455. /* Number of set bits in a word. Not performance critical. */
  456. static int set_bits(word n)
  457. {
  458. word m = n;
  459. int result = 0;
  460. while (m > 0) {
  461. if (m & 1) result++;
  462. m >>= 1;
  463. }
  464. return(result);
  465. }
  466. int GC_n_set_marks(hdr *hhdr)
  467. {
  468. int result = 0;
  469. int i;
  470. int n_mark_words;
  471. # ifdef MARK_BIT_PER_OBJ
  472. int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
  473. if (0 == n_objs) n_objs = 1;
  474. n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
  475. # else /* MARK_BIT_PER_GRANULE */
  476. n_mark_words = MARK_BITS_SZ;
  477. # endif
  478. for (i = 0; i < n_mark_words - 1; i++) {
  479. result += set_bits(hhdr -> hb_marks[i]);
  480. }
  481. # ifdef MARK_BIT_PER_OBJ
  482. result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
  483. << (n_mark_words * WORDSZ - n_objs));
  484. # else
  485. result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
  486. # endif
  487. return(result - 1);
  488. }
  489. #endif /* !USE_MARK_BYTES */
  490. STATIC void GC_print_block_descr(struct hblk *h,
  491. word /* struct PrintStats */ raw_ps)
  492. {
  493. hdr * hhdr = HDR(h);
  494. size_t bytes = hhdr -> hb_sz;
  495. struct Print_stats *ps;
  496. unsigned n_marks = GC_n_set_marks(hhdr);
  497. unsigned n_objs = (unsigned)HBLK_OBJS(bytes);
  498. if (0 == n_objs) n_objs = 1;
  499. if (hhdr -> hb_n_marks != n_marks) {
  500. GC_printf("%u,%u,%u!=%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
  501. (unsigned)hhdr->hb_n_marks, n_marks, n_objs);
  502. } else {
  503. GC_printf("%u,%u,%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
  504. n_marks, n_objs);
  505. }
  506. ps = (struct Print_stats *)raw_ps;
  507. ps->total_bytes += (bytes + (HBLKSIZE-1)) & ~(HBLKSIZE-1); /* round up */
  508. ps->number_of_blocks++;
  509. }
  510. void GC_print_block_list(void)
  511. {
  512. struct Print_stats pstats;
  513. GC_printf("kind(0=ptrfree,1=normal,2=unc.),"
  514. "size_in_bytes,#_marks_set,#objs\n");
  515. pstats.number_of_blocks = 0;
  516. pstats.total_bytes = 0;
  517. GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
  518. GC_printf("blocks= %lu, bytes= %lu\n",
  519. (unsigned long)pstats.number_of_blocks,
  520. (unsigned long)pstats.total_bytes);
  521. }
  522. #include "gc_inline.h" /* for GC_print_free_list prototype */
  523. /* Currently for debugger use only: */
  524. GC_API void GC_CALL GC_print_free_list(int kind, size_t sz_in_granules)
  525. {
  526. void *flh_next;
  527. int n;
  528. GC_ASSERT(kind < MAXOBJKINDS);
  529. GC_ASSERT(sz_in_granules <= MAXOBJGRANULES);
  530. flh_next = GC_obj_kinds[kind].ok_freelist[sz_in_granules];
  531. for (n = 0; flh_next; n++) {
  532. GC_printf("Free object in heap block %p [%d]: %p\n",
  533. (void *)HBLKPTR(flh_next), n, flh_next);
  534. flh_next = obj_link(flh_next);
  535. }
  536. }
  537. #endif /* !NO_DEBUGGING */
  538. /*
  539. * Clear all obj_link pointers in the list of free objects *flp.
  540. * Clear *flp.
  541. * This must be done before dropping a list of free gcj-style objects,
  542. * since may otherwise end up with dangling "descriptor" pointers.
  543. * It may help for other pointer-containing objects.
  544. */
  545. STATIC void GC_clear_fl_links(void **flp)
  546. {
  547. void *next = *flp;
  548. while (0 != next) {
  549. *flp = 0;
  550. flp = &(obj_link(next));
  551. next = *flp;
  552. }
  553. }
  554. /*
  555. * Perform GC_reclaim_block on the entire heap, after first clearing
  556. * small object free lists (if we are not just looking for leaks).
  557. */
  558. GC_INNER void GC_start_reclaim(GC_bool report_if_found)
  559. {
  560. unsigned kind;
  561. # if defined(PARALLEL_MARK)
  562. GC_ASSERT(0 == GC_fl_builder_count);
  563. # endif
  564. /* Reset in use counters. GC_reclaim_block recomputes them. */
  565. GC_composite_in_use = 0;
  566. GC_atomic_in_use = 0;
  567. /* Clear reclaim- and free-lists */
  568. for (kind = 0; kind < GC_n_kinds; kind++) {
  569. struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
  570. GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
  571. if (rlist == 0) continue; /* This kind not used. */
  572. if (!report_if_found) {
  573. void **fop;
  574. void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
  575. for (fop = GC_obj_kinds[kind].ok_freelist;
  576. (word)fop < (word)lim; (*(word **)&fop)++) {
  577. if (*fop != 0) {
  578. if (should_clobber) {
  579. GC_clear_fl_links(fop);
  580. } else {
  581. *fop = 0;
  582. }
  583. }
  584. }
  585. } /* otherwise free list objects are marked, */
  586. /* and its safe to leave them */
  587. BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
  588. }
  589. /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
  590. /* or enqueue the block for later processing. */
  591. GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
  592. # ifdef EAGER_SWEEP
  593. /* This is a very stupid thing to do. We make it possible anyway, */
  594. /* so that you can convince yourself that it really is very stupid. */
  595. GC_reclaim_all((GC_stop_func)0, FALSE);
  596. # elif defined(ENABLE_DISCLAIM)
  597. /* However, make sure to clear reclaimable objects of kinds with */
  598. /* unconditional marking enabled before we do any significant */
  599. /* marking work. */
  600. GC_reclaim_unconditionally_marked();
  601. # endif
  602. # if defined(PARALLEL_MARK)
  603. GC_ASSERT(0 == GC_fl_builder_count);
  604. # endif
  605. }
  606. /*
  607. * Sweep blocks of the indicated object size and kind until either the
  608. * appropriate free list is nonempty, or there are no more blocks to
  609. * sweep.
  610. */
  611. GC_INNER void GC_continue_reclaim(word sz /* granules */, int kind)
  612. {
  613. hdr * hhdr;
  614. struct hblk * hbp;
  615. struct obj_kind * ok = &(GC_obj_kinds[kind]);
  616. struct hblk ** rlh = ok -> ok_reclaim_list;
  617. void **flh = &(ok -> ok_freelist[sz]);
  618. if (rlh == 0) return; /* No blocks of this kind. */
  619. rlh += sz;
  620. while ((hbp = *rlh) != 0) {
  621. hhdr = HDR(hbp);
  622. *rlh = hhdr -> hb_next;
  623. GC_reclaim_small_nonempty_block(hbp, FALSE);
  624. if (*flh != 0) break;
  625. }
  626. }
  627. /*
  628. * Reclaim all small blocks waiting to be reclaimed.
  629. * Abort and return FALSE when/if (*stop_func)() returns TRUE.
  630. * If this returns TRUE, then it's safe to restart the world
  631. * with incorrectly cleared mark bits.
  632. * If ignore_old is TRUE, then reclaim only blocks that have been
  633. * recently reclaimed, and discard the rest.
  634. * Stop_func may be 0.
  635. */
  636. GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
  637. {
  638. word sz;
  639. unsigned kind;
  640. hdr * hhdr;
  641. struct hblk * hbp;
  642. struct obj_kind * ok;
  643. struct hblk ** rlp;
  644. struct hblk ** rlh;
  645. # ifndef NO_CLOCK
  646. CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
  647. if (GC_print_stats == VERBOSE)
  648. GET_TIME(start_time);
  649. # endif
  650. for (kind = 0; kind < GC_n_kinds; kind++) {
  651. ok = &(GC_obj_kinds[kind]);
  652. rlp = ok -> ok_reclaim_list;
  653. if (rlp == 0) continue;
  654. for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
  655. rlh = rlp + sz;
  656. while ((hbp = *rlh) != 0) {
  657. if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
  658. return(FALSE);
  659. }
  660. hhdr = HDR(hbp);
  661. *rlh = hhdr -> hb_next;
  662. if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
  663. /* It's likely we'll need it this time, too */
  664. /* It's been touched recently, so this */
  665. /* shouldn't trigger paging. */
  666. GC_reclaim_small_nonempty_block(hbp, FALSE);
  667. }
  668. }
  669. }
  670. }
  671. # ifndef NO_CLOCK
  672. if (GC_print_stats == VERBOSE) {
  673. CLOCK_TYPE done_time;
  674. GET_TIME(done_time);
  675. GC_verbose_log_printf("Disposing of reclaim lists took %lu msecs\n",
  676. MS_TIME_DIFF(done_time,start_time));
  677. }
  678. # endif
  679. return(TRUE);
  680. }
  681. #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
  682. /* We do an eager sweep on heap blocks where unconditional marking has */
  683. /* been enabled, so that any reclaimable objects have been reclaimed */
  684. /* before we start marking. This is a simplified GC_reclaim_all */
  685. /* restricted to kinds where ok_mark_unconditionally is true. */
  686. STATIC void GC_reclaim_unconditionally_marked(void)
  687. {
  688. word sz;
  689. unsigned kind;
  690. hdr * hhdr;
  691. struct hblk * hbp;
  692. struct obj_kind * ok;
  693. struct hblk ** rlp;
  694. struct hblk ** rlh;
  695. for (kind = 0; kind < GC_n_kinds; kind++) {
  696. ok = &(GC_obj_kinds[kind]);
  697. if (!ok->ok_mark_unconditionally)
  698. continue;
  699. rlp = ok->ok_reclaim_list;
  700. if (rlp == 0)
  701. continue;
  702. for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
  703. rlh = rlp + sz;
  704. while ((hbp = *rlh) != 0) {
  705. hhdr = HDR(hbp);
  706. *rlh = hhdr->hb_next;
  707. GC_reclaim_small_nonempty_block(hbp, FALSE);
  708. }
  709. }
  710. }
  711. }
  712. #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
  713. struct enumerate_reachable_s {
  714. GC_reachable_object_proc proc;
  715. void *client_data;
  716. };
  717. STATIC void GC_do_enumerate_reachable_objects(struct hblk *hbp, word ped)
  718. {
  719. struct hblkhdr *hhdr = HDR(hbp);
  720. size_t sz = (size_t)hhdr->hb_sz;
  721. size_t bit_no;
  722. char *p, *plim;
  723. if (GC_block_empty(hhdr)) {
  724. return;
  725. }
  726. p = hbp->hb_body;
  727. if (sz > MAXOBJBYTES) { /* one big object */
  728. plim = p;
  729. } else {
  730. plim = hbp->hb_body + HBLKSIZE - sz;
  731. }
  732. /* Go through all words in block. */
  733. for (bit_no = 0; p <= plim; bit_no += MARK_BIT_OFFSET(sz), p += sz) {
  734. if (mark_bit_from_hdr(hhdr, bit_no)) {
  735. ((struct enumerate_reachable_s *)ped)->proc(p, sz,
  736. ((struct enumerate_reachable_s *)ped)->client_data);
  737. }
  738. }
  739. }
  740. GC_API void GC_CALL GC_enumerate_reachable_objects_inner(
  741. GC_reachable_object_proc proc,
  742. void *client_data)
  743. {
  744. struct enumerate_reachable_s ed;
  745. GC_ASSERT(I_HOLD_LOCK());
  746. ed.proc = proc;
  747. ed.client_data = client_data;
  748. GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, (word)&ed);
  749. }