headers.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. #include "private/gc_priv.h"
  16. /*
  17. * This implements:
  18. * 1. allocation of heap block headers
  19. * 2. A map from addresses to heap block addresses to heap block headers
  20. *
  21. * Access speed is crucial. We implement an index structure based on a 2
  22. * level tree.
  23. */
  24. STATIC bottom_index * GC_all_bottom_indices = 0;
  25. /* Pointer to the first (lowest address) */
  26. /* bottom_index. Assumes the lock is held. */
  27. STATIC bottom_index * GC_all_bottom_indices_end = 0;
  28. /* Pointer to the last (highest address) */
  29. /* bottom_index. Assumes the lock is held. */
  30. void GC_clear_bottom_indices()
  31. {
  32. GC_all_bottom_indices = 0;
  33. GC_all_bottom_indices_end = 0;
  34. }
  35. /* Non-macro version of header location routine */
  36. GC_INNER hdr * GC_find_header(ptr_t h)
  37. {
  38. # ifdef HASH_TL
  39. hdr * result;
  40. GET_HDR(h, result);
  41. return(result);
  42. # else
  43. return(HDR_INNER(h));
  44. # endif
  45. }
  46. /* Handle a header cache miss. Returns a pointer to the */
  47. /* header corresponding to p, if p can possibly be a valid */
  48. /* object pointer, and 0 otherwise. */
  49. /* GUARANTEED to return 0 for a pointer past the first page */
  50. /* of an object unless both GC_all_interior_pointers is set */
  51. /* and p is in fact a valid object pointer. */
  52. /* Never returns a pointer to a free hblk. */
  53. GC_INNER hdr *
  54. #ifdef PRINT_BLACK_LIST
  55. GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
  56. #else
  57. GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
  58. #endif
  59. {
  60. hdr *hhdr;
  61. HC_MISS();
  62. GET_HDR(p, hhdr);
  63. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  64. if (GC_all_interior_pointers) {
  65. if (hhdr != 0) {
  66. ptr_t current = p;
  67. current = (ptr_t)HBLKPTR(current);
  68. do {
  69. current = current - HBLKSIZE*(word)hhdr;
  70. hhdr = HDR(current);
  71. } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
  72. /* current points to near the start of the large object */
  73. if (hhdr -> hb_flags & IGNORE_OFF_PAGE)
  74. return 0;
  75. if (HBLK_IS_FREE(hhdr)
  76. || p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
  77. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  78. /* Pointer past the end of the block */
  79. return 0;
  80. }
  81. } else {
  82. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  83. /* And return zero: */
  84. }
  85. GC_ASSERT(hhdr == 0 || !HBLK_IS_FREE(hhdr));
  86. return hhdr;
  87. /* Pointers past the first page are probably too rare */
  88. /* to add them to the cache. We don't. */
  89. /* And correctness relies on the fact that we don't. */
  90. } else {
  91. if (hhdr == 0) {
  92. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  93. }
  94. return 0;
  95. }
  96. } else {
  97. if (HBLK_IS_FREE(hhdr)) {
  98. GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
  99. return 0;
  100. } else {
  101. hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
  102. hce -> hce_hdr = hhdr;
  103. return hhdr;
  104. }
  105. }
  106. }
  107. /* Routines to dynamically allocate collector data structures that will */
  108. /* never be freed. */
  109. static ptr_t scratch_free_ptr = 0;
  110. /* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
  111. /* GC_scratch_end_ptr is end point of current scratch area. */
  112. GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
  113. {
  114. ptr_t result = scratch_free_ptr;
  115. size_t bytes_to_get;
  116. bytes = ROUNDUP_GRANULE_SIZE(bytes);
  117. for (;;) {
  118. scratch_free_ptr += bytes;
  119. if ((word)scratch_free_ptr <= (word)GC_scratch_end_ptr) {
  120. /* Unallocated space of scratch buffer has enough size. */
  121. return result;
  122. }
  123. if (bytes >= MINHINCR * HBLKSIZE) {
  124. bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
  125. result = (ptr_t)GET_MEM(bytes_to_get);
  126. GC_add_to_our_memory(result, bytes_to_get);
  127. /* Undo scratch free area pointer update; get memory directly. */
  128. scratch_free_ptr -= bytes;
  129. if (result != NULL) {
  130. /* Update end point of last obtained area (needed only */
  131. /* by GC_register_dynamic_libraries for some targets). */
  132. GC_scratch_last_end_ptr = result + bytes;
  133. }
  134. return result;
  135. }
  136. bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(MINHINCR * HBLKSIZE);
  137. /* round up for safety */
  138. result = (ptr_t)GET_MEM(bytes_to_get);
  139. GC_add_to_our_memory(result, bytes_to_get);
  140. if (NULL == result) {
  141. WARN("Out of memory - trying to allocate requested amount"
  142. " (%" WARN_PRIdPTR " bytes)...\n", (word)bytes);
  143. scratch_free_ptr -= bytes; /* Undo free area pointer update */
  144. bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
  145. result = (ptr_t)GET_MEM(bytes_to_get);
  146. GC_add_to_our_memory(result, bytes_to_get);
  147. return result;
  148. }
  149. /* Update scratch area pointers and retry. */
  150. scratch_free_ptr = result;
  151. GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
  152. GC_scratch_last_end_ptr = GC_scratch_end_ptr;
  153. }
  154. }
  155. static hdr * hdr_free_list = 0;
  156. /* Return an uninitialized header */
  157. static hdr * alloc_hdr(void)
  158. {
  159. hdr * result;
  160. if (NULL == hdr_free_list) {
  161. result = (hdr *)GC_scratch_alloc(sizeof(hdr));
  162. } else {
  163. result = hdr_free_list;
  164. hdr_free_list = (hdr *) (result -> hb_next);
  165. }
  166. return(result);
  167. }
  168. GC_INLINE void free_hdr(hdr * hhdr)
  169. {
  170. hhdr -> hb_next = (struct hblk *) hdr_free_list;
  171. hdr_free_list = hhdr;
  172. }
  173. #ifdef COUNT_HDR_CACHE_HITS
  174. /* Used for debugging/profiling (the symbols are externally visible). */
  175. word GC_hdr_cache_hits = 0;
  176. word GC_hdr_cache_misses = 0;
  177. #endif
  178. GC_INNER void GC_init_headers(void)
  179. {
  180. unsigned i;
  181. if (GC_all_nils == NULL)
  182. {
  183. GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
  184. if (GC_all_nils == NULL) {
  185. GC_err_printf("Insufficient memory for GC_all_nils\n");
  186. EXIT();
  187. }
  188. }
  189. BZERO(GC_all_nils, sizeof(bottom_index));
  190. for (i = 0; i < TOP_SZ; i++) {
  191. GC_top_index[i] = GC_all_nils;
  192. }
  193. }
  194. /* Make sure that there is a bottom level index block for address addr. */
  195. /* Return FALSE on failure. */
  196. static GC_bool get_index(word addr)
  197. {
  198. word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  199. bottom_index * r;
  200. bottom_index * p;
  201. bottom_index ** prev;
  202. bottom_index *pi; /* old_p */
  203. word i;
  204. GC_ASSERT(I_HOLD_LOCK());
  205. # ifdef HASH_TL
  206. i = TL_HASH(hi);
  207. pi = p = GC_top_index[i];
  208. while(p != GC_all_nils) {
  209. if (p -> key == hi) return(TRUE);
  210. p = p -> hash_link;
  211. }
  212. # else
  213. if (GC_top_index[hi] != GC_all_nils)
  214. return TRUE;
  215. i = hi;
  216. # endif
  217. r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
  218. if (EXPECT(NULL == r, FALSE))
  219. return FALSE;
  220. BZERO(r, sizeof(bottom_index));
  221. r -> key = hi;
  222. # ifdef HASH_TL
  223. r -> hash_link = pi;
  224. # endif
  225. /* Add it to the list of bottom indices */
  226. prev = &GC_all_bottom_indices; /* pointer to p */
  227. pi = 0; /* bottom_index preceding p */
  228. while ((p = *prev) != 0 && p -> key < hi) {
  229. pi = p;
  230. prev = &(p -> asc_link);
  231. }
  232. r -> desc_link = pi;
  233. if (0 == p) {
  234. GC_all_bottom_indices_end = r;
  235. } else {
  236. p -> desc_link = r;
  237. }
  238. r -> asc_link = p;
  239. *prev = r;
  240. GC_top_index[i] = r;
  241. return(TRUE);
  242. }
  243. /* Install a header for block h. */
  244. /* The header is uninitialized. */
  245. /* Returns the header or 0 on failure. */
  246. GC_INNER struct hblkhdr * GC_install_header(struct hblk *h)
  247. {
  248. hdr * result;
  249. if (!get_index((word) h)) return(0);
  250. result = alloc_hdr();
  251. if (result) {
  252. SET_HDR(h, result);
  253. # ifdef USE_MUNMAP
  254. result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  255. # endif
  256. }
  257. return(result);
  258. }
  259. /* Set up forwarding counts for block h of size sz */
  260. GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
  261. {
  262. struct hblk * hbp;
  263. for (hbp = h; (word)hbp < (word)h + sz; hbp += BOTTOM_SZ) {
  264. if (!get_index((word) hbp)) return(FALSE);
  265. }
  266. if (!get_index((word)h + sz - 1)) return(FALSE);
  267. for (hbp = h + 1; (word)hbp < (word)h + sz; hbp += 1) {
  268. word i = HBLK_PTR_DIFF(hbp, h);
  269. SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
  270. }
  271. return(TRUE);
  272. }
  273. /* Remove the header for block h */
  274. GC_INNER void GC_remove_header(struct hblk *h)
  275. {
  276. hdr **ha;
  277. GET_HDR_ADDR(h, ha);
  278. free_hdr(*ha);
  279. *ha = 0;
  280. }
  281. /* Remove forwarding counts for h */
  282. GC_INNER void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
  283. {
  284. struct hblk * hbp;
  285. for (hbp = h+1; (word)hbp < (word)h + sz; hbp += 1) {
  286. SET_HDR(hbp, 0);
  287. }
  288. }
  289. /* Apply fn to all allocated blocks. It is the caller responsibility */
  290. /* to avoid data race during the function execution (e.g. by holding */
  291. /* the allocation lock). */
  292. void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
  293. word client_data)
  294. {
  295. signed_word j;
  296. bottom_index * index_p;
  297. for (index_p = GC_all_bottom_indices; index_p != 0;
  298. index_p = index_p -> asc_link) {
  299. for (j = BOTTOM_SZ-1; j >= 0;) {
  300. if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
  301. if (!HBLK_IS_FREE(index_p->index[j])) {
  302. (*fn)(((struct hblk *)
  303. (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
  304. << LOG_HBLKSIZE)),
  305. client_data);
  306. }
  307. j--;
  308. } else if (index_p->index[j] == 0) {
  309. j--;
  310. } else {
  311. j -= (signed_word)(index_p->index[j]);
  312. }
  313. }
  314. }
  315. }
  316. /* Get the next valid block whose address is at least h */
  317. /* Return 0 if there is none. */
  318. GC_INNER struct hblk * GC_next_used_block(struct hblk *h)
  319. {
  320. REGISTER bottom_index * bi;
  321. REGISTER word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
  322. GC_ASSERT(I_HOLD_LOCK());
  323. GET_BI(h, bi);
  324. if (bi == GC_all_nils) {
  325. REGISTER word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  326. bi = GC_all_bottom_indices;
  327. while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
  328. j = 0;
  329. }
  330. while(bi != 0) {
  331. while (j < BOTTOM_SZ) {
  332. hdr * hhdr = bi -> index[j];
  333. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  334. j++;
  335. } else {
  336. if (!HBLK_IS_FREE(hhdr)) {
  337. return((struct hblk *)
  338. (((bi -> key << LOG_BOTTOM_SZ) + j)
  339. << LOG_HBLKSIZE));
  340. } else {
  341. j += divHBLKSZ(hhdr -> hb_sz);
  342. }
  343. }
  344. }
  345. j = 0;
  346. bi = bi -> asc_link;
  347. }
  348. return(0);
  349. }
  350. /* Get the last (highest address) block whose address is */
  351. /* at most h. Return 0 if there is none. */
  352. /* Unlike the above, this may return a free block. */
  353. GC_INNER struct hblk * GC_prev_block(struct hblk *h)
  354. {
  355. bottom_index * bi;
  356. signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
  357. GC_ASSERT(I_HOLD_LOCK());
  358. GET_BI(h, bi);
  359. if (bi == GC_all_nils) {
  360. word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
  361. bi = GC_all_bottom_indices_end;
  362. while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
  363. j = BOTTOM_SZ - 1;
  364. }
  365. while(bi != 0) {
  366. while (j >= 0) {
  367. hdr * hhdr = bi -> index[j];
  368. if (0 == hhdr) {
  369. --j;
  370. } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  371. j -= (signed_word)hhdr;
  372. } else {
  373. return((struct hblk *)
  374. (((bi -> key << LOG_BOTTOM_SZ) + j)
  375. << LOG_HBLKSIZE));
  376. }
  377. }
  378. j = BOTTOM_SZ - 1;
  379. bi = bi -> desc_link;
  380. }
  381. return(0);
  382. }