typd_mlc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. /*
  2. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1999-2000 by Hewlett-Packard Company. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. *
  14. */
  15. #include "private/gc_pmark.h"
  16. #include "gc_inline.h" /* for GC_malloc_kind */
  17. /*
  18. * Some simple primitives for allocation with explicit type information.
  19. * Simple objects are allocated such that they contain a GC_descr at the
  20. * end (in the last allocated word). This descriptor may be a procedure
  21. * which then examines an extended descriptor passed as its environment.
  22. *
  23. * Arrays are treated as simple objects if they have sufficiently simple
  24. * structure. Otherwise they are allocated from an array kind that supplies
  25. * a special mark procedure. These arrays contain a pointer to a
  26. * complex_descriptor as their last word.
  27. * This is done because the environment field is too small, and the collector
  28. * must trace the complex_descriptor.
  29. *
  30. * Note that descriptors inside objects may appear cleared, if we encounter a
  31. * false reference to an object on a free list. In the GC_descr case, this
  32. * is OK, since a 0 descriptor corresponds to examining no fields.
  33. * In the complex_descriptor case, we explicitly check for that case.
  34. *
  35. * MAJOR PARTS OF THIS CODE HAVE NOT BEEN TESTED AT ALL and are not testable,
  36. * since they are not accessible through the current interface.
  37. */
  38. #include "gc_typed.h"
  39. #define TYPD_EXTRA_BYTES (sizeof(word) - EXTRA_BYTES)
  40. STATIC int GC_explicit_kind = 0;
  41. /* Object kind for objects with indirect */
  42. /* (possibly extended) descriptors. */
  43. STATIC int GC_array_kind = 0;
  44. /* Object kind for objects with complex */
  45. /* descriptors and GC_array_mark_proc. */
  46. /* Extended descriptors. GC_typed_mark_proc understands these. */
  47. /* These are used for simple objects that are larger than what */
  48. /* can be described by a BITMAP_BITS sized bitmap. */
  49. typedef struct {
  50. word ed_bitmap; /* lsb corresponds to first word. */
  51. GC_bool ed_continued; /* next entry is continuation. */
  52. } ext_descr;
  53. /* Array descriptors. GC_array_mark_proc understands these. */
  54. /* We may eventually need to add provisions for headers and */
  55. /* trailers. Hence we provide for tree structured descriptors, */
  56. /* though we don't really use them currently. */
  57. struct LeafDescriptor { /* Describes simple array */
  58. word ld_tag;
  59. # define LEAF_TAG 1
  60. size_t ld_size; /* bytes per element */
  61. /* multiple of ALIGNMENT. */
  62. size_t ld_nelements; /* Number of elements. */
  63. GC_descr ld_descriptor; /* A simple length, bitmap, */
  64. /* or procedure descriptor. */
  65. } ld;
  66. struct ComplexArrayDescriptor {
  67. word ad_tag;
  68. # define ARRAY_TAG 2
  69. size_t ad_nelements;
  70. union ComplexDescriptor * ad_element_descr;
  71. } ad;
  72. struct SequenceDescriptor {
  73. word sd_tag;
  74. # define SEQUENCE_TAG 3
  75. union ComplexDescriptor * sd_first;
  76. union ComplexDescriptor * sd_second;
  77. } sd;
  78. typedef union ComplexDescriptor {
  79. struct LeafDescriptor ld;
  80. struct ComplexArrayDescriptor ad;
  81. struct SequenceDescriptor sd;
  82. } complex_descriptor;
  83. #define TAG ad.ad_tag
  84. STATIC ext_descr * GC_ext_descriptors = NULL;
  85. /* Points to array of extended */
  86. /* descriptors. */
  87. STATIC size_t GC_ed_size = 0; /* Current size of above arrays. */
  88. #define ED_INITIAL_SIZE 100
  89. STATIC size_t GC_avail_descr = 0; /* Next available slot. */
  90. STATIC int GC_typed_mark_proc_index = 0; /* Indices of my mark */
  91. STATIC int GC_array_mark_proc_index = 0; /* procedures. */
  92. #ifdef AO_HAVE_load_acquire
  93. STATIC volatile AO_t GC_explicit_typing_initialized = FALSE;
  94. #else
  95. STATIC GC_bool GC_explicit_typing_initialized = FALSE;
  96. #endif
  97. STATIC void GC_push_typed_structures_proc(void)
  98. {
  99. GC_PUSH_ALL_SYM(GC_ext_descriptors);
  100. }
  101. /* Add a multiword bitmap to GC_ext_descriptors arrays. Return */
  102. /* starting index. */
  103. /* Returns -1 on failure. */
  104. /* Caller does not hold allocation lock. */
  105. STATIC signed_word GC_add_ext_descriptor(const word * bm, word nbits)
  106. {
  107. size_t nwords = divWORDSZ(nbits + WORDSZ-1);
  108. signed_word result;
  109. size_t i;
  110. word last_part;
  111. size_t extra_bits;
  112. DCL_LOCK_STATE;
  113. LOCK();
  114. while (GC_avail_descr + nwords >= GC_ed_size) {
  115. ext_descr * newExtD;
  116. size_t new_size;
  117. word ed_size = GC_ed_size;
  118. if (ed_size == 0) {
  119. GC_ASSERT((word)(&GC_ext_descriptors) % sizeof(word) == 0);
  120. GC_push_typed_structures = GC_push_typed_structures_proc;
  121. UNLOCK();
  122. new_size = ED_INITIAL_SIZE;
  123. } else {
  124. UNLOCK();
  125. new_size = 2 * ed_size;
  126. if (new_size > MAX_ENV) return(-1);
  127. }
  128. newExtD = (ext_descr *)GC_malloc_atomic(new_size * sizeof(ext_descr));
  129. if (NULL == newExtD)
  130. return -1;
  131. LOCK();
  132. if (ed_size == GC_ed_size) {
  133. if (GC_avail_descr != 0) {
  134. BCOPY(GC_ext_descriptors, newExtD,
  135. GC_avail_descr * sizeof(ext_descr));
  136. }
  137. GC_ed_size = new_size;
  138. GC_ext_descriptors = newExtD;
  139. } /* else another thread already resized it in the meantime */
  140. }
  141. result = GC_avail_descr;
  142. for (i = 0; i < nwords-1; i++) {
  143. GC_ext_descriptors[result + i].ed_bitmap = bm[i];
  144. GC_ext_descriptors[result + i].ed_continued = TRUE;
  145. }
  146. last_part = bm[i];
  147. /* Clear irrelevant bits. */
  148. extra_bits = nwords * WORDSZ - nbits;
  149. last_part <<= extra_bits;
  150. last_part >>= extra_bits;
  151. GC_ext_descriptors[result + i].ed_bitmap = last_part;
  152. GC_ext_descriptors[result + i].ed_continued = FALSE;
  153. GC_avail_descr += nwords;
  154. UNLOCK();
  155. return(result);
  156. }
  157. /* Table of bitmap descriptors for n word long all pointer objects. */
  158. STATIC GC_descr GC_bm_table[WORDSZ/2];
  159. /* Return a descriptor for the concatenation of 2 nwords long objects, */
  160. /* each of which is described by descriptor. */
  161. /* The result is known to be short enough to fit into a bitmap */
  162. /* descriptor. */
  163. /* Descriptor is a GC_DS_LENGTH or GC_DS_BITMAP descriptor. */
  164. STATIC GC_descr GC_double_descr(GC_descr descriptor, word nwords)
  165. {
  166. if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
  167. descriptor = GC_bm_table[BYTES_TO_WORDS((word)descriptor)];
  168. };
  169. descriptor |= (descriptor & ~GC_DS_TAGS) >> nwords;
  170. return(descriptor);
  171. }
  172. STATIC complex_descriptor *
  173. GC_make_sequence_descriptor(complex_descriptor *first,
  174. complex_descriptor *second);
  175. /* Build a descriptor for an array with nelements elements, */
  176. /* each of which can be described by a simple descriptor. */
  177. /* We try to optimize some common cases. */
  178. /* If the result is COMPLEX, then a complex_descr* is returned */
  179. /* in *complex_d. */
  180. /* If the result is LEAF, then we built a LeafDescriptor in */
  181. /* the structure pointed to by leaf. */
  182. /* The tag in the leaf structure is not set. */
  183. /* If the result is SIMPLE, then a GC_descr */
  184. /* is returned in *simple_d. */
  185. /* If the result is NO_MEM, then */
  186. /* we failed to allocate the descriptor. */
  187. /* The implementation knows that GC_DS_LENGTH is 0. */
  188. /* *leaf, *complex_d, and *simple_d may be used as temporaries */
  189. /* during the construction. */
  190. #define COMPLEX 2
  191. #define LEAF 1
  192. #define SIMPLE 0
  193. #define NO_MEM (-1)
  194. STATIC int GC_make_array_descriptor(size_t nelements, size_t size,
  195. GC_descr descriptor, GC_descr *simple_d,
  196. complex_descriptor **complex_d,
  197. struct LeafDescriptor * leaf)
  198. {
  199. # define OPT_THRESHOLD 50
  200. /* For larger arrays, we try to combine descriptors of adjacent */
  201. /* descriptors to speed up marking, and to reduce the amount */
  202. /* of space needed on the mark stack. */
  203. if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
  204. if (descriptor == (GC_descr)size) {
  205. *simple_d = nelements * descriptor;
  206. return(SIMPLE);
  207. } else if ((word)descriptor == 0) {
  208. *simple_d = (GC_descr)0;
  209. return(SIMPLE);
  210. }
  211. }
  212. if (nelements <= OPT_THRESHOLD) {
  213. if (nelements <= 1) {
  214. if (nelements == 1) {
  215. *simple_d = descriptor;
  216. return(SIMPLE);
  217. } else {
  218. *simple_d = (GC_descr)0;
  219. return(SIMPLE);
  220. }
  221. }
  222. } else if (size <= BITMAP_BITS/2
  223. && (descriptor & GC_DS_TAGS) != GC_DS_PROC
  224. && (size & (sizeof(word)-1)) == 0) {
  225. int result =
  226. GC_make_array_descriptor(nelements/2, 2*size,
  227. GC_double_descr(descriptor,
  228. BYTES_TO_WORDS(size)),
  229. simple_d, complex_d, leaf);
  230. if ((nelements & 1) == 0) {
  231. return(result);
  232. } else {
  233. struct LeafDescriptor * one_element =
  234. (struct LeafDescriptor *)
  235. GC_malloc_atomic(sizeof(struct LeafDescriptor));
  236. if (result == NO_MEM || one_element == 0) return(NO_MEM);
  237. one_element -> ld_tag = LEAF_TAG;
  238. one_element -> ld_size = size;
  239. one_element -> ld_nelements = 1;
  240. one_element -> ld_descriptor = descriptor;
  241. switch(result) {
  242. case SIMPLE:
  243. {
  244. struct LeafDescriptor * beginning =
  245. (struct LeafDescriptor *)
  246. GC_malloc_atomic(sizeof(struct LeafDescriptor));
  247. if (beginning == 0) return(NO_MEM);
  248. beginning -> ld_tag = LEAF_TAG;
  249. beginning -> ld_size = size;
  250. beginning -> ld_nelements = 1;
  251. beginning -> ld_descriptor = *simple_d;
  252. *complex_d = GC_make_sequence_descriptor(
  253. (complex_descriptor *)beginning,
  254. (complex_descriptor *)one_element);
  255. break;
  256. }
  257. case LEAF:
  258. {
  259. struct LeafDescriptor * beginning =
  260. (struct LeafDescriptor *)
  261. GC_malloc_atomic(sizeof(struct LeafDescriptor));
  262. if (beginning == 0) return(NO_MEM);
  263. beginning -> ld_tag = LEAF_TAG;
  264. beginning -> ld_size = leaf -> ld_size;
  265. beginning -> ld_nelements = leaf -> ld_nelements;
  266. beginning -> ld_descriptor = leaf -> ld_descriptor;
  267. *complex_d = GC_make_sequence_descriptor(
  268. (complex_descriptor *)beginning,
  269. (complex_descriptor *)one_element);
  270. break;
  271. }
  272. case COMPLEX:
  273. *complex_d = GC_make_sequence_descriptor(
  274. *complex_d,
  275. (complex_descriptor *)one_element);
  276. break;
  277. }
  278. return(COMPLEX);
  279. }
  280. }
  281. leaf -> ld_size = size;
  282. leaf -> ld_nelements = nelements;
  283. leaf -> ld_descriptor = descriptor;
  284. return(LEAF);
  285. }
  286. STATIC complex_descriptor *
  287. GC_make_sequence_descriptor(complex_descriptor *first,
  288. complex_descriptor *second)
  289. {
  290. struct SequenceDescriptor * result =
  291. (struct SequenceDescriptor *)
  292. GC_malloc(sizeof(struct SequenceDescriptor));
  293. /* Can't result in overly conservative marking, since tags are */
  294. /* very small integers. Probably faster than maintaining type */
  295. /* info. */
  296. if (result != 0) {
  297. result -> sd_tag = SEQUENCE_TAG;
  298. result -> sd_first = first;
  299. result -> sd_second = second;
  300. GC_dirty(result);
  301. }
  302. return((complex_descriptor *)result);
  303. }
  304. STATIC ptr_t * GC_eobjfreelist = NULL;
  305. STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
  306. mse * mark_stack_limit, word env);
  307. STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
  308. mse * mark_stack_limit, word env);
  309. STATIC void GC_init_explicit_typing(void)
  310. {
  311. unsigned i;
  312. GC_STATIC_ASSERT(sizeof(struct LeafDescriptor) % sizeof(word) == 0);
  313. /* Set up object kind with simple indirect descriptor. */
  314. GC_eobjfreelist = (ptr_t *)GC_new_free_list_inner();
  315. GC_explicit_kind = GC_new_kind_inner(
  316. (void **)GC_eobjfreelist,
  317. (WORDS_TO_BYTES((word)-1) | GC_DS_PER_OBJECT),
  318. TRUE, TRUE);
  319. /* Descriptors are in the last word of the object. */
  320. GC_typed_mark_proc_index = GC_new_proc_inner(GC_typed_mark_proc);
  321. /* Set up object kind with array descriptor. */
  322. GC_array_mark_proc_index = GC_new_proc_inner(GC_array_mark_proc);
  323. GC_array_kind = GC_new_kind_inner(GC_new_free_list_inner(),
  324. GC_MAKE_PROC(GC_array_mark_proc_index, 0),
  325. FALSE, TRUE);
  326. GC_bm_table[0] = GC_DS_BITMAP;
  327. for (i = 1; i < WORDSZ/2; i++) {
  328. GC_bm_table[i] = (((word)-1) << (WORDSZ - i)) | GC_DS_BITMAP;
  329. }
  330. }
  331. STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
  332. mse * mark_stack_limit, word env)
  333. {
  334. word bm = GC_ext_descriptors[env].ed_bitmap;
  335. word * current_p = addr;
  336. word current;
  337. ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
  338. ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
  339. DECLARE_HDR_CACHE;
  340. INIT_HDR_CACHE;
  341. for (; bm != 0; bm >>= 1, current_p++) {
  342. if (bm & 1) {
  343. current = *current_p;
  344. FIXUP_POINTER(current);
  345. if (current >= (word)least_ha && current <= (word)greatest_ha) {
  346. PUSH_CONTENTS((ptr_t)current, mark_stack_ptr,
  347. mark_stack_limit, (ptr_t)current_p);
  348. }
  349. }
  350. }
  351. if (GC_ext_descriptors[env].ed_continued) {
  352. /* Push an entry with the rest of the descriptor back onto the */
  353. /* stack. Thus we never do too much work at once. Note that */
  354. /* we also can't overflow the mark stack unless we actually */
  355. /* mark something. */
  356. mark_stack_ptr++;
  357. if ((word)mark_stack_ptr >= (word)mark_stack_limit) {
  358. mark_stack_ptr = GC_signal_mark_stack_overflow(mark_stack_ptr);
  359. }
  360. mark_stack_ptr -> mse_start = (ptr_t)(addr + WORDSZ);
  361. mark_stack_ptr -> mse_descr.w =
  362. GC_MAKE_PROC(GC_typed_mark_proc_index, env + 1);
  363. }
  364. return(mark_stack_ptr);
  365. }
  366. /* Return the size of the object described by d. It would be faster to */
  367. /* store this directly, or to compute it as part of */
  368. /* GC_push_complex_descriptor, but hopefully it doesn't matter. */
  369. STATIC word GC_descr_obj_size(complex_descriptor *d)
  370. {
  371. switch(d -> TAG) {
  372. case LEAF_TAG:
  373. return(d -> ld.ld_nelements * d -> ld.ld_size);
  374. case ARRAY_TAG:
  375. return(d -> ad.ad_nelements
  376. * GC_descr_obj_size(d -> ad.ad_element_descr));
  377. case SEQUENCE_TAG:
  378. return(GC_descr_obj_size(d -> sd.sd_first)
  379. + GC_descr_obj_size(d -> sd.sd_second));
  380. default:
  381. ABORT_RET("Bad complex descriptor");
  382. return 0;
  383. }
  384. }
  385. /* Push descriptors for the object at addr with complex descriptor d */
  386. /* onto the mark stack. Return 0 if the mark stack overflowed. */
  387. STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
  388. mse *msp, mse *msl)
  389. {
  390. ptr_t current = (ptr_t)addr;
  391. word nelements;
  392. word sz;
  393. word i;
  394. switch(d -> TAG) {
  395. case LEAF_TAG:
  396. {
  397. GC_descr descr = d -> ld.ld_descriptor;
  398. nelements = d -> ld.ld_nelements;
  399. if (msl - msp <= (ptrdiff_t)nelements) return(0);
  400. sz = d -> ld.ld_size;
  401. for (i = 0; i < nelements; i++) {
  402. msp++;
  403. msp -> mse_start = current;
  404. msp -> mse_descr.w = descr;
  405. current += sz;
  406. }
  407. return(msp);
  408. }
  409. case ARRAY_TAG:
  410. {
  411. complex_descriptor *descr = d -> ad.ad_element_descr;
  412. nelements = d -> ad.ad_nelements;
  413. sz = GC_descr_obj_size(descr);
  414. for (i = 0; i < nelements; i++) {
  415. msp = GC_push_complex_descriptor((word *)current, descr,
  416. msp, msl);
  417. if (msp == 0) return(0);
  418. current += sz;
  419. }
  420. return(msp);
  421. }
  422. case SEQUENCE_TAG:
  423. {
  424. sz = GC_descr_obj_size(d -> sd.sd_first);
  425. msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_first,
  426. msp, msl);
  427. if (msp == 0) return(0);
  428. current += sz;
  429. msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_second,
  430. msp, msl);
  431. return(msp);
  432. }
  433. default:
  434. ABORT_RET("Bad complex descriptor");
  435. return 0;
  436. }
  437. }
  438. STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
  439. mse * mark_stack_limit,
  440. word env GC_ATTR_UNUSED)
  441. {
  442. hdr * hhdr = HDR(addr);
  443. word sz = hhdr -> hb_sz;
  444. word nwords = BYTES_TO_WORDS(sz);
  445. complex_descriptor * descr = (complex_descriptor *)(addr[nwords-1]);
  446. mse * orig_mark_stack_ptr = mark_stack_ptr;
  447. mse * new_mark_stack_ptr;
  448. if (descr == 0) {
  449. /* Found a reference to a free list entry. Ignore it. */
  450. return(orig_mark_stack_ptr);
  451. }
  452. /* In use counts were already updated when array descriptor was */
  453. /* pushed. Here we only replace it by subobject descriptors, so */
  454. /* no update is necessary. */
  455. new_mark_stack_ptr = GC_push_complex_descriptor(addr, descr,
  456. mark_stack_ptr,
  457. mark_stack_limit-1);
  458. if (new_mark_stack_ptr == 0) {
  459. /* Explicitly instruct Clang Static Analyzer that ptr is non-null. */
  460. if (NULL == mark_stack_ptr) ABORT("Bad mark_stack_ptr");
  461. /* Doesn't fit. Conservatively push the whole array as a unit */
  462. /* and request a mark stack expansion. */
  463. /* This cannot cause a mark stack overflow, since it replaces */
  464. /* the original array entry. */
  465. # ifdef PARALLEL_MARK
  466. /* We might be using a local_mark_stack in parallel mode. */
  467. if (GC_mark_stack + GC_mark_stack_size == mark_stack_limit)
  468. # endif
  469. {
  470. GC_mark_stack_too_small = TRUE;
  471. }
  472. new_mark_stack_ptr = orig_mark_stack_ptr + 1;
  473. new_mark_stack_ptr -> mse_start = (ptr_t)addr;
  474. new_mark_stack_ptr -> mse_descr.w = sz | GC_DS_LENGTH;
  475. } else {
  476. /* Push descriptor itself */
  477. new_mark_stack_ptr++;
  478. new_mark_stack_ptr -> mse_start = (ptr_t)(addr + nwords - 1);
  479. new_mark_stack_ptr -> mse_descr.w = sizeof(word) | GC_DS_LENGTH;
  480. }
  481. return new_mark_stack_ptr;
  482. }
  483. GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * bm, size_t len)
  484. {
  485. signed_word last_set_bit = len - 1;
  486. GC_descr result;
  487. DCL_LOCK_STATE;
  488. # if defined(AO_HAVE_load_acquire) && defined(AO_HAVE_store_release)
  489. if (!EXPECT(AO_load_acquire(&GC_explicit_typing_initialized), TRUE)) {
  490. LOCK();
  491. if (!GC_explicit_typing_initialized) {
  492. GC_init_explicit_typing();
  493. AO_store_release(&GC_explicit_typing_initialized, TRUE);
  494. }
  495. UNLOCK();
  496. }
  497. # else
  498. LOCK();
  499. if (!EXPECT(GC_explicit_typing_initialized, TRUE)) {
  500. GC_init_explicit_typing();
  501. GC_explicit_typing_initialized = TRUE;
  502. }
  503. UNLOCK();
  504. # endif
  505. while (last_set_bit >= 0 && !GC_get_bit(bm, last_set_bit))
  506. last_set_bit--;
  507. if (last_set_bit < 0) return(0 /* no pointers */);
  508. # if ALIGNMENT == CPP_WORDSZ/8
  509. {
  510. signed_word i;
  511. for (i = 0; i < last_set_bit; i++) {
  512. if (!GC_get_bit(bm, i)) {
  513. break;
  514. }
  515. }
  516. if (i == last_set_bit) {
  517. /* An initial section contains all pointers. Use length descriptor. */
  518. return (WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
  519. }
  520. }
  521. # endif
  522. if ((word)last_set_bit < BITMAP_BITS) {
  523. signed_word i;
  524. /* Hopefully the common case. */
  525. /* Build bitmap descriptor (with bits reversed) */
  526. result = SIGNB;
  527. for (i = last_set_bit - 1; i >= 0; i--) {
  528. result >>= 1;
  529. if (GC_get_bit(bm, i)) result |= SIGNB;
  530. }
  531. result |= GC_DS_BITMAP;
  532. } else {
  533. signed_word index = GC_add_ext_descriptor(bm, (word)last_set_bit + 1);
  534. if (index == -1) return(WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
  535. /* Out of memory: use conservative */
  536. /* approximation. */
  537. result = GC_MAKE_PROC(GC_typed_mark_proc_index, (word)index);
  538. }
  539. return result;
  540. }
  541. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_explicitly_typed(size_t lb,
  542. GC_descr d)
  543. {
  544. word *op;
  545. size_t lg;
  546. GC_ASSERT(GC_explicit_typing_initialized);
  547. lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
  548. op = (word *)GC_malloc_kind(lb, GC_explicit_kind);
  549. if (EXPECT(NULL == op, FALSE))
  550. return NULL;
  551. /* It is not safe to use GC_size_map[lb] to compute lg here as the */
  552. /* the former might be updated asynchronously. */
  553. lg = BYTES_TO_GRANULES(GC_size(op));
  554. op[GRANULES_TO_WORDS(lg) - 1] = d;
  555. GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
  556. return op;
  557. }
  558. /* We make the GC_clear_stack() call a tail one, hoping to get more of */
  559. /* the stack. */
  560. #define GENERAL_MALLOC_IOP(lb, k) \
  561. GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
  562. GC_API GC_ATTR_MALLOC void * GC_CALL
  563. GC_malloc_explicitly_typed_ignore_off_page(size_t lb, GC_descr d)
  564. {
  565. ptr_t op;
  566. size_t lg;
  567. DCL_LOCK_STATE;
  568. GC_ASSERT(GC_explicit_typing_initialized);
  569. lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
  570. if (SMALL_OBJ(lb)) {
  571. GC_DBG_COLLECT_AT_MALLOC(lb);
  572. LOCK();
  573. lg = GC_size_map[lb];
  574. op = GC_eobjfreelist[lg];
  575. if (EXPECT(0 == op, FALSE)) {
  576. UNLOCK();
  577. op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
  578. if (0 == op) return 0;
  579. /* See the comment in GC_malloc_explicitly_typed. */
  580. lg = BYTES_TO_GRANULES(GC_size(op));
  581. } else {
  582. GC_eobjfreelist[lg] = (ptr_t)obj_link(op);
  583. obj_link(op) = 0;
  584. GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  585. UNLOCK();
  586. }
  587. } else {
  588. op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
  589. if (NULL == op) return NULL;
  590. lg = BYTES_TO_GRANULES(GC_size(op));
  591. }
  592. ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
  593. GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
  594. return op;
  595. }
  596. GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n,
  597. size_t lb, GC_descr d)
  598. {
  599. word *op;
  600. size_t lg;
  601. GC_descr simple_descr;
  602. complex_descriptor *complex_descr;
  603. int descr_type;
  604. struct LeafDescriptor leaf;
  605. GC_ASSERT(GC_explicit_typing_initialized);
  606. descr_type = GC_make_array_descriptor((word)n, (word)lb, d, &simple_descr,
  607. &complex_descr, &leaf);
  608. if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial check */
  609. && lb > 0 && n > GC_SIZE_MAX / lb)
  610. return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
  611. lb *= n;
  612. switch(descr_type) {
  613. case NO_MEM: return(0);
  614. case SIMPLE:
  615. return GC_malloc_explicitly_typed(lb, simple_descr);
  616. case LEAF:
  617. lb = SIZET_SAT_ADD(lb,
  618. sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES);
  619. break;
  620. case COMPLEX:
  621. lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
  622. break;
  623. }
  624. op = (word *)GC_malloc_kind(lb, GC_array_kind);
  625. if (EXPECT(NULL == op, FALSE))
  626. return NULL;
  627. lg = BYTES_TO_GRANULES(GC_size(op));
  628. if (descr_type == LEAF) {
  629. /* Set up the descriptor inside the object itself. */
  630. volatile struct LeafDescriptor * lp =
  631. (struct LeafDescriptor *)
  632. (op + GRANULES_TO_WORDS(lg)
  633. - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
  634. lp -> ld_tag = LEAF_TAG;
  635. lp -> ld_size = leaf.ld_size;
  636. lp -> ld_nelements = leaf.ld_nelements;
  637. lp -> ld_descriptor = leaf.ld_descriptor;
  638. ((volatile word *)op)[GRANULES_TO_WORDS(lg) - 1] = (word)lp;
  639. } else {
  640. # ifndef GC_NO_FINALIZATION
  641. size_t lw = GRANULES_TO_WORDS(lg);
  642. op[lw - 1] = (word)complex_descr;
  643. GC_dirty(op + lw - 1);
  644. /* Make sure the descriptor is cleared once there is any danger */
  645. /* it may have been collected. */
  646. if (EXPECT(GC_general_register_disappearing_link(
  647. (void **)(op + lw - 1), op)
  648. == GC_NO_MEMORY, FALSE))
  649. # endif
  650. {
  651. /* Couldn't register it due to lack of memory. Punt. */
  652. return (*GC_get_oom_fn())(lb);
  653. }
  654. }
  655. return op;
  656. }