blacklst.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. #include "private/gc_priv.h"
  15. /*
  16. * We maintain several hash tables of hblks that have had false hits.
  17. * Each contains one bit per hash bucket; If any page in the bucket
  18. * has had a false hit, we assume that all of them have.
  19. * See the definition of page_hash_table in gc_private.h.
  20. * False hits from the stack(s) are much more dangerous than false hits
  21. * from elsewhere, since the former can pin a large object that spans the
  22. * block, even though it does not start on the dangerous block.
  23. */
  24. /*
  25. * Externally callable routines are:
  26. * GC_add_to_black_list_normal
  27. * GC_add_to_black_list_stack
  28. * GC_promote_black_lists
  29. * GC_is_black_listed
  30. *
  31. * All require that the allocator lock is held.
  32. */
  33. /* Pointers to individual tables. We replace one table by another by */
  34. /* switching these pointers. */
  35. STATIC word * GC_old_normal_bl = NULL;
  36. /* Nonstack false references seen at last full */
  37. /* collection. */
  38. STATIC word * GC_incomplete_normal_bl = NULL;
  39. /* Nonstack false references seen since last */
  40. /* full collection. */
  41. STATIC word * GC_old_stack_bl = NULL;
  42. STATIC word * GC_incomplete_stack_bl = NULL;
  43. STATIC word GC_total_stack_black_listed = 0;
  44. /* Number of bytes on stack blacklist. */
  45. GC_INNER word GC_black_list_spacing = MINHINCR * HBLKSIZE;
  46. /* Initial rough guess. */
  47. STATIC void GC_clear_bl(word *);
  48. GC_INNER void GC_default_print_heap_obj_proc(ptr_t p)
  49. {
  50. ptr_t base = (ptr_t)GC_base(p);
  51. int kind = HDR(base)->hb_obj_kind;
  52. GC_err_printf("object at %p of appr. %lu bytes (%s)\n",
  53. (void *)base, (unsigned long)GC_size(base),
  54. kind == PTRFREE ? "atomic" :
  55. IS_UNCOLLECTABLE(kind) ? "uncollectable" : "composite");
  56. }
  57. GC_INNER void (*GC_print_heap_obj)(ptr_t p) = GC_default_print_heap_obj_proc;
  58. #ifdef PRINT_BLACK_LIST
  59. STATIC void GC_print_blacklisted_ptr(word p, ptr_t source,
  60. const char *kind_str)
  61. {
  62. ptr_t base = (ptr_t)GC_base(source);
  63. if (0 == base) {
  64. GC_err_printf("Black listing (%s) %p referenced from %p in %s\n",
  65. kind_str, (void *)p, (void *)source,
  66. NULL != source ? "root set" : "register");
  67. } else {
  68. /* FIXME: We can't call the debug version of GC_print_heap_obj */
  69. /* (with PRINT_CALL_CHAIN) here because the lock is held and */
  70. /* the world is stopped. */
  71. GC_err_printf("Black listing (%s) %p referenced from %p in"
  72. " object at %p of appr. %lu bytes\n",
  73. kind_str, (void *)p, (void *)source,
  74. (void *)base, (unsigned long)GC_size(base));
  75. }
  76. }
  77. #endif /* PRINT_BLACK_LIST */
  78. GC_INNER void GC_bl_init_no_interiors(void)
  79. {
  80. if (GC_incomplete_normal_bl == 0) {
  81. GC_old_normal_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
  82. GC_incomplete_normal_bl = (word *)GC_scratch_alloc(
  83. sizeof(page_hash_table));
  84. if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
  85. GC_err_printf("Insufficient memory for black list\n");
  86. EXIT();
  87. }
  88. GC_clear_bl(GC_old_normal_bl);
  89. GC_clear_bl(GC_incomplete_normal_bl);
  90. }
  91. }
  92. GC_INNER void GC_bl_init(void)
  93. {
  94. if (!GC_all_interior_pointers) {
  95. GC_bl_init_no_interiors();
  96. }
  97. GC_ASSERT(NULL == GC_old_stack_bl && NULL == GC_incomplete_stack_bl);
  98. GC_old_stack_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
  99. GC_incomplete_stack_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
  100. if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
  101. GC_err_printf("Insufficient memory for black list\n");
  102. EXIT();
  103. }
  104. GC_clear_bl(GC_old_stack_bl);
  105. GC_clear_bl(GC_incomplete_stack_bl);
  106. }
  107. STATIC void GC_clear_bl(word *doomed)
  108. {
  109. BZERO(doomed, sizeof(page_hash_table));
  110. }
  111. STATIC void GC_copy_bl(word *old, word *dest)
  112. {
  113. BCOPY(old, dest, sizeof(page_hash_table));
  114. }
  115. static word total_stack_black_listed(void);
  116. /* Signal the completion of a collection. Turn the incomplete black */
  117. /* lists into new black lists, etc. */
  118. GC_INNER void GC_promote_black_lists(void)
  119. {
  120. word * very_old_normal_bl = GC_old_normal_bl;
  121. word * very_old_stack_bl = GC_old_stack_bl;
  122. GC_old_normal_bl = GC_incomplete_normal_bl;
  123. GC_old_stack_bl = GC_incomplete_stack_bl;
  124. if (!GC_all_interior_pointers) {
  125. GC_clear_bl(very_old_normal_bl);
  126. }
  127. GC_clear_bl(very_old_stack_bl);
  128. GC_incomplete_normal_bl = very_old_normal_bl;
  129. GC_incomplete_stack_bl = very_old_stack_bl;
  130. GC_total_stack_black_listed = total_stack_black_listed();
  131. GC_VERBOSE_LOG_PRINTF(
  132. "%lu bytes in heap blacklisted for interior pointers\n",
  133. (unsigned long)GC_total_stack_black_listed);
  134. if (GC_total_stack_black_listed != 0) {
  135. GC_black_list_spacing =
  136. HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
  137. }
  138. if (GC_black_list_spacing < 3 * HBLKSIZE) {
  139. GC_black_list_spacing = 3 * HBLKSIZE;
  140. }
  141. if (GC_black_list_spacing > MAXHINCR * HBLKSIZE) {
  142. GC_black_list_spacing = MAXHINCR * HBLKSIZE;
  143. /* Makes it easier to allocate really huge blocks, which otherwise */
  144. /* may have problems with nonuniform blacklist distributions. */
  145. /* This way we should always succeed immediately after growing the */
  146. /* heap. */
  147. }
  148. }
  149. GC_INNER void GC_unpromote_black_lists(void)
  150. {
  151. if (!GC_all_interior_pointers) {
  152. GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
  153. }
  154. GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
  155. }
  156. #if defined(set_pht_entry_from_index_concurrent) && defined(PARALLEL_MARK) \
  157. && defined(THREAD_SANITIZER)
  158. # define backlist_set_pht_entry_from_index(db, index) \
  159. set_pht_entry_from_index_concurrent(db, index)
  160. #else
  161. /* It is safe to set a bit in a blacklist even without */
  162. /* synchronization, the only drawback is that we might have */
  163. /* to redo blacklisting sometimes. */
  164. # define backlist_set_pht_entry_from_index(bl, index) \
  165. set_pht_entry_from_index(bl, index)
  166. #endif
  167. /* P is not a valid pointer reference, but it falls inside */
  168. /* the plausible heap bounds. */
  169. /* Add it to the normal incomplete black list if appropriate. */
  170. #ifdef PRINT_BLACK_LIST
  171. GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source)
  172. #else
  173. GC_INNER void GC_add_to_black_list_normal(word p)
  174. #endif
  175. {
  176. if (GC_modws_valid_offsets[p & (sizeof(word)-1)]) {
  177. word index = PHT_HASH((word)p);
  178. if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
  179. # ifdef PRINT_BLACK_LIST
  180. if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
  181. GC_print_blacklisted_ptr(p, source, "normal");
  182. }
  183. # endif
  184. backlist_set_pht_entry_from_index(GC_incomplete_normal_bl, index);
  185. } /* else this is probably just an interior pointer to an allocated */
  186. /* object, and isn't worth black listing. */
  187. }
  188. }
  189. /* And the same for false pointers from the stack. */
  190. #ifdef PRINT_BLACK_LIST
  191. GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source)
  192. #else
  193. GC_INNER void GC_add_to_black_list_stack(word p)
  194. #endif
  195. {
  196. word index = PHT_HASH((word)p);
  197. if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
  198. # ifdef PRINT_BLACK_LIST
  199. if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
  200. GC_print_blacklisted_ptr(p, source, "stack");
  201. }
  202. # endif
  203. backlist_set_pht_entry_from_index(GC_incomplete_stack_bl, index);
  204. }
  205. }
  206. /*
  207. * Is the block starting at h of size len bytes black listed? If so,
  208. * return the address of the next plausible r such that (r, len) might not
  209. * be black listed. (R may not actually be in the heap. We guarantee only
  210. * that every smaller value of r after h is also black listed.)
  211. * If (h,len) is not black listed, return 0.
  212. * Knows about the structure of the black list hash tables.
  213. */
  214. struct hblk * GC_is_black_listed(struct hblk *h, word len)
  215. {
  216. word index = PHT_HASH((word)h);
  217. word i;
  218. word nblocks;
  219. if (!GC_all_interior_pointers
  220. && (get_pht_entry_from_index(GC_old_normal_bl, index)
  221. || get_pht_entry_from_index(GC_incomplete_normal_bl, index))) {
  222. return (h+1);
  223. }
  224. nblocks = divHBLKSZ(len);
  225. for (i = 0;;) {
  226. if (GC_old_stack_bl[divWORDSZ(index)] == 0
  227. && GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
  228. /* An easy case */
  229. i += WORDSZ - modWORDSZ(index);
  230. } else {
  231. if (get_pht_entry_from_index(GC_old_stack_bl, index)
  232. || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
  233. return(h+i+1);
  234. }
  235. i++;
  236. }
  237. if (i >= nblocks) break;
  238. index = PHT_HASH((word)(h+i));
  239. }
  240. return(0);
  241. }
  242. /* Return the number of blacklisted blocks in a given range. */
  243. /* Used only for statistical purposes. */
  244. /* Looks only at the GC_incomplete_stack_bl. */
  245. STATIC word GC_number_stack_black_listed(struct hblk *start,
  246. struct hblk *endp1)
  247. {
  248. struct hblk * h;
  249. word result = 0;
  250. for (h = start; (word)h < (word)endp1; h++) {
  251. word index = PHT_HASH((word)h);
  252. if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
  253. }
  254. return(result);
  255. }
  256. /* Return the total number of (stack) black-listed bytes. */
  257. static word total_stack_black_listed(void)
  258. {
  259. unsigned i;
  260. word total = 0;
  261. for (i = 0; i < GC_n_heap_sects; i++) {
  262. struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
  263. struct hblk * endp1 = start + divHBLKSZ(GC_heap_sects[i].hs_bytes);
  264. total += GC_number_stack_black_listed(start, endp1);
  265. }
  266. return(total * HBLKSIZE);
  267. }