malloc.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. #include "private/gc_priv.h"
  16. #include "gc_inline.h" /* for GC_malloc_kind */
  17. #include <stdio.h>
  18. #include <string.h>
  19. /* Allocate reclaim list for kind: */
  20. /* Return TRUE on success */
  21. STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
  22. {
  23. struct hblk ** result = (struct hblk **)
  24. GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
  25. if (result == 0) return(FALSE);
  26. BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
  27. kind -> ok_reclaim_list = result;
  28. return(TRUE);
  29. }
  30. /* Allocate a large block of size lb bytes. */
  31. /* The block is not cleared. */
  32. /* Flags is 0 or IGNORE_OFF_PAGE. */
  33. /* EXTRA_BYTES were already added to lb. */
  34. GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
  35. {
  36. struct hblk * h;
  37. word n_blocks;
  38. ptr_t result;
  39. GC_bool retry = FALSE;
  40. GC_ASSERT(I_HOLD_LOCK());
  41. lb = ROUNDUP_GRANULE_SIZE(lb);
  42. n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb);
  43. if (!EXPECT(GC_is_initialized, TRUE)) {
  44. DCL_LOCK_STATE;
  45. UNLOCK(); /* just to unset GC_lock_holder */
  46. GC_init();
  47. LOCK();
  48. }
  49. /* Do our share of marking work */
  50. if (GC_incremental && !GC_dont_gc)
  51. GC_collect_a_little_inner((int)n_blocks);
  52. h = GC_allochblk(lb, k, flags);
  53. # ifdef USE_MUNMAP
  54. if (0 == h) {
  55. GC_merge_unmapped();
  56. h = GC_allochblk(lb, k, flags);
  57. }
  58. # endif
  59. while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
  60. h = GC_allochblk(lb, k, flags);
  61. retry = TRUE;
  62. }
  63. if (h == 0) {
  64. result = 0;
  65. } else {
  66. size_t total_bytes = n_blocks * HBLKSIZE;
  67. if (n_blocks > 1) {
  68. GC_large_allocd_bytes += total_bytes;
  69. if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
  70. GC_max_large_allocd_bytes = GC_large_allocd_bytes;
  71. }
  72. /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
  73. result = h -> hb_body;
  74. }
  75. return result;
  76. }
  77. /* Allocate a large block of size lb bytes. Clear if appropriate. */
  78. /* EXTRA_BYTES were already added to lb. */
  79. STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
  80. {
  81. ptr_t result;
  82. GC_ASSERT(I_HOLD_LOCK());
  83. result = GC_alloc_large(lb, k, flags);
  84. if (result != NULL
  85. && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
  86. word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
  87. /* Clear the whole block, in case of GC_realloc call. */
  88. BZERO(result, n_blocks * HBLKSIZE);
  89. }
  90. return result;
  91. }
  92. /* Fill in additional entries in GC_size_map, including the i-th one. */
  93. /* Note that a filled in section of the array ending at n always */
  94. /* has the length of at least n/4. */
  95. STATIC void GC_extend_size_map(size_t i)
  96. {
  97. size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
  98. size_t granule_sz;
  99. size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
  100. /* The size we try to preserve. */
  101. /* Close to i, unless this would */
  102. /* introduce too many distinct sizes. */
  103. size_t smaller_than_i = byte_sz - (byte_sz >> 3);
  104. size_t low_limit; /* The lowest indexed entry we initialize. */
  105. size_t number_of_objs;
  106. GC_ASSERT(I_HOLD_LOCK());
  107. GC_ASSERT(0 == GC_size_map[i]);
  108. if (0 == GC_size_map[smaller_than_i]) {
  109. low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
  110. granule_sz = orig_granule_sz;
  111. while (GC_size_map[low_limit] != 0)
  112. low_limit++;
  113. } else {
  114. low_limit = smaller_than_i + 1;
  115. while (GC_size_map[low_limit] != 0)
  116. low_limit++;
  117. granule_sz = ROUNDED_UP_GRANULES(low_limit);
  118. granule_sz += granule_sz >> 3;
  119. if (granule_sz < orig_granule_sz)
  120. granule_sz = orig_granule_sz;
  121. }
  122. /* For these larger sizes, we use an even number of granules. */
  123. /* This makes it easier to, e.g., construct a 16-byte-aligned */
  124. /* allocator even if GRANULE_BYTES is 8. */
  125. granule_sz = (granule_sz + 1) & ~1;
  126. if (granule_sz > MAXOBJGRANULES)
  127. granule_sz = MAXOBJGRANULES;
  128. /* If we can fit the same number of larger objects in a block, do so. */
  129. number_of_objs = HBLK_GRANULES / granule_sz;
  130. GC_ASSERT(number_of_objs != 0);
  131. granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
  132. byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
  133. /* We may need one extra byte; do not always */
  134. /* fill in GC_size_map[byte_sz]. */
  135. for (; low_limit <= byte_sz; low_limit++)
  136. GC_size_map[low_limit] = granule_sz;
  137. }
  138. /* Allocate lb bytes for an object of kind k. */
  139. /* Should not be used to directly to allocate objects */
  140. /* that require special handling on allocation. */
  141. GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
  142. {
  143. void *op;
  144. GC_ASSERT(I_HOLD_LOCK());
  145. GC_ASSERT(k < MAXOBJKINDS);
  146. if (SMALL_OBJ(lb)) {
  147. struct obj_kind * kind = GC_obj_kinds + k;
  148. size_t lg = GC_size_map[lb];
  149. void ** opp = &(kind -> ok_freelist[lg]);
  150. op = *opp;
  151. if (EXPECT(0 == op, FALSE)) {
  152. if (lg == 0) {
  153. if (!EXPECT(GC_is_initialized, TRUE)) {
  154. DCL_LOCK_STATE;
  155. UNLOCK(); /* just to unset GC_lock_holder */
  156. GC_init();
  157. LOCK();
  158. lg = GC_size_map[lb];
  159. }
  160. if (0 == lg) {
  161. GC_extend_size_map(lb);
  162. lg = GC_size_map[lb];
  163. GC_ASSERT(lg != 0);
  164. }
  165. /* Retry */
  166. opp = &(kind -> ok_freelist[lg]);
  167. op = *opp;
  168. }
  169. if (0 == op) {
  170. if (0 == kind -> ok_reclaim_list &&
  171. !GC_alloc_reclaim_list(kind))
  172. return NULL;
  173. op = GC_allocobj(lg, k);
  174. if (0 == op)
  175. return NULL;
  176. }
  177. }
  178. *opp = obj_link(op);
  179. obj_link(op) = 0;
  180. GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  181. } else {
  182. op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
  183. if (op != NULL)
  184. GC_bytes_allocd += lb;
  185. }
  186. return op;
  187. }
  188. #if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
  189. || !defined(GC_NO_FINALIZATION)
  190. /* Allocate a composite object of size n bytes. The caller */
  191. /* guarantees that pointers past the first page are not relevant. */
  192. GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
  193. {
  194. word lb_adjusted;
  195. void * op;
  196. GC_ASSERT(I_HOLD_LOCK());
  197. if (lb <= HBLKSIZE)
  198. return GC_generic_malloc_inner(lb, k);
  199. GC_ASSERT(k < MAXOBJKINDS);
  200. lb_adjusted = ADD_SLOP(lb);
  201. op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
  202. if (op != NULL)
  203. GC_bytes_allocd += lb_adjusted;
  204. return op;
  205. }
  206. #endif
  207. #ifdef GC_COLLECT_AT_MALLOC
  208. /* Parameter to force GC at every malloc of size greater or equal to */
  209. /* the given value. This might be handy during debugging. */
  210. # if defined(CPPCHECK)
  211. size_t GC_dbg_collect_at_malloc_min_lb = 16*1024; /* e.g. */
  212. # else
  213. size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
  214. # endif
  215. #endif
  216. GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
  217. {
  218. void * result;
  219. DCL_LOCK_STATE;
  220. GC_ASSERT(k < MAXOBJKINDS);
  221. if (EXPECT(GC_have_errors, FALSE))
  222. GC_print_all_errors();
  223. GC_INVOKE_FINALIZERS();
  224. GC_DBG_COLLECT_AT_MALLOC(lb);
  225. if (SMALL_OBJ(lb)) {
  226. LOCK();
  227. result = GC_generic_malloc_inner(lb, k);
  228. UNLOCK();
  229. } else {
  230. size_t lg;
  231. size_t lb_rounded;
  232. word n_blocks;
  233. GC_bool init;
  234. lg = ROUNDED_UP_GRANULES(lb);
  235. lb_rounded = GRANULES_TO_BYTES(lg);
  236. n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
  237. init = GC_obj_kinds[k].ok_init;
  238. LOCK();
  239. result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
  240. if (0 != result) {
  241. if (GC_debugging_started) {
  242. BZERO(result, n_blocks * HBLKSIZE);
  243. } else {
  244. # ifdef THREADS
  245. /* Clear any memory that might be used for GC descriptors */
  246. /* before we release the lock. */
  247. ((word *)result)[0] = 0;
  248. ((word *)result)[1] = 0;
  249. ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
  250. ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
  251. # endif
  252. }
  253. GC_bytes_allocd += lb_rounded;
  254. }
  255. UNLOCK();
  256. if (init && !GC_debugging_started && 0 != result) {
  257. BZERO(result, n_blocks * HBLKSIZE);
  258. }
  259. }
  260. if (0 == result) {
  261. return((*GC_get_oom_fn())(lb));
  262. } else {
  263. return(result);
  264. }
  265. }
  266. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
  267. {
  268. GC_ASSERT(k < MAXOBJKINDS);
  269. if (SMALL_OBJ(lb)) {
  270. void *op;
  271. void **opp;
  272. size_t lg;
  273. DCL_LOCK_STATE;
  274. GC_DBG_COLLECT_AT_MALLOC(lb);
  275. LOCK();
  276. lg = GC_size_map[lb];
  277. opp = &GC_obj_kinds[k].ok_freelist[lg];
  278. op = *opp;
  279. if (EXPECT(op != NULL, TRUE)) {
  280. if (k == PTRFREE) {
  281. *opp = obj_link(op);
  282. } else {
  283. GC_ASSERT(0 == obj_link(op)
  284. || ((word)obj_link(op)
  285. <= (word)GC_greatest_plausible_heap_addr
  286. && (word)obj_link(op)
  287. >= (word)GC_least_plausible_heap_addr));
  288. *opp = obj_link(op);
  289. obj_link(op) = 0;
  290. }
  291. GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  292. UNLOCK();
  293. return op;
  294. }
  295. UNLOCK();
  296. }
  297. /* We make the GC_clear_stack() call a tail one, hoping to get more */
  298. /* of the stack. */
  299. return GC_clear_stack(GC_generic_malloc(lb, k));
  300. }
  301. #if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
  302. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t lb, int k)
  303. {
  304. return GC_malloc_kind_global(lb, k);
  305. }
  306. #endif
  307. #if !IL2CPP_ENABLE_WRITE_BARRIER_VALIDATION
  308. /* Allocate lb bytes of atomic (pointer-free) data. */
  309. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
  310. {
  311. return GC_malloc_kind(lb, PTRFREE);
  312. }
  313. /* Allocate lb bytes of composite (pointerful) data. */
  314. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
  315. {
  316. return GC_malloc_kind(lb, NORMAL);
  317. }
  318. #endif
  319. GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
  320. size_t lb, int k)
  321. {
  322. void *op;
  323. DCL_LOCK_STATE;
  324. GC_ASSERT(k < MAXOBJKINDS);
  325. if (SMALL_OBJ(lb)) {
  326. void **opp;
  327. size_t lg;
  328. GC_DBG_COLLECT_AT_MALLOC(lb);
  329. if (EXTRA_BYTES != 0 && lb != 0) lb--;
  330. /* We don't need the extra byte, since this won't be */
  331. /* collected anyway. */
  332. LOCK();
  333. lg = GC_size_map[lb];
  334. opp = &GC_obj_kinds[k].ok_freelist[lg];
  335. op = *opp;
  336. if (EXPECT(op != NULL, TRUE)) {
  337. *opp = obj_link(op);
  338. obj_link(op) = 0;
  339. GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  340. /* Mark bit was already set on free list. It will be */
  341. /* cleared only temporarily during a collection, as a */
  342. /* result of the normal free list mark bit clearing. */
  343. GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
  344. UNLOCK();
  345. } else {
  346. UNLOCK();
  347. op = GC_generic_malloc(lb, k);
  348. /* For small objects, the free lists are completely marked. */
  349. }
  350. GC_ASSERT(0 == op || GC_is_marked(op));
  351. } else {
  352. hdr * hhdr;
  353. op = GC_generic_malloc(lb, k);
  354. if (NULL == op)
  355. return NULL;
  356. GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
  357. hhdr = HDR(op);
  358. /* We don't need the lock here, since we have an undisguised */
  359. /* pointer. We do need to hold the lock while we adjust */
  360. /* mark bits. */
  361. LOCK();
  362. set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
  363. # ifndef THREADS
  364. GC_ASSERT(hhdr -> hb_n_marks == 0);
  365. /* This is not guaranteed in the multi-threaded case */
  366. /* because the counter could be updated before locking. */
  367. # endif
  368. hhdr -> hb_n_marks = 1;
  369. UNLOCK();
  370. }
  371. return op;
  372. }
  373. #if !IL2CPP_ENABLE_WRITE_BARRIER_VALIDATION
  374. /* Allocate lb bytes of pointerful, traced, but not collectible data. */
  375. GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
  376. {
  377. return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
  378. }
  379. #endif
  380. #ifdef GC_ATOMIC_UNCOLLECTABLE
  381. /* Allocate lb bytes of pointer-free, untraced, uncollectible data */
  382. /* This is normally roughly equivalent to the system malloc. */
  383. /* But it may be useful if malloc is redefined. */
  384. GC_API GC_ATTR_MALLOC void * GC_CALL
  385. GC_malloc_atomic_uncollectable(size_t lb)
  386. {
  387. return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
  388. }
  389. #endif /* GC_ATOMIC_UNCOLLECTABLE */
  390. #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
  391. # ifndef MSWINCE
  392. # include <errno.h>
  393. # endif
  394. /* Avoid unnecessary nested procedure calls here, by #defining some */
  395. /* malloc replacements. Otherwise we end up saving a meaningless */
  396. /* return address in the object. It also speeds things up, but it is */
  397. /* admittedly quite ugly. */
  398. # define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
  399. # if defined(CPPCHECK)
  400. # define REDIRECT_MALLOC_F GC_malloc /* e.g. */
  401. # else
  402. # define REDIRECT_MALLOC_F REDIRECT_MALLOC
  403. # endif
  404. void * malloc(size_t lb)
  405. {
  406. /* It might help to manually inline the GC_malloc call here. */
  407. /* But any decent compiler should reduce the extra procedure call */
  408. /* to at most a jump instruction in this case. */
  409. # if defined(I386) && defined(GC_SOLARIS_THREADS)
  410. /* Thread initialization can call malloc before we are ready for. */
  411. /* It is not clear that this is enough to help matters. */
  412. /* The thread implementation may well call malloc at other */
  413. /* inopportune times. */
  414. if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
  415. # endif
  416. return (void *)REDIRECT_MALLOC_F(lb);
  417. }
  418. # if defined(GC_LINUX_THREADS)
  419. STATIC ptr_t GC_libpthread_start = 0;
  420. STATIC ptr_t GC_libpthread_end = 0;
  421. STATIC ptr_t GC_libld_start = 0;
  422. STATIC ptr_t GC_libld_end = 0;
  423. STATIC void GC_init_lib_bounds(void)
  424. {
  425. IF_CANCEL(int cancel_state;)
  426. if (GC_libpthread_start != 0) return;
  427. DISABLE_CANCEL(cancel_state);
  428. GC_init(); /* if not called yet */
  429. if (!GC_text_mapping("libpthread-",
  430. &GC_libpthread_start, &GC_libpthread_end)) {
  431. WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
  432. /* This might still work with some versions of libpthread, */
  433. /* so we don't abort. Perhaps we should. */
  434. /* Generate message only once: */
  435. GC_libpthread_start = (ptr_t)1;
  436. }
  437. if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
  438. WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
  439. }
  440. RESTORE_CANCEL(cancel_state);
  441. }
  442. # endif /* GC_LINUX_THREADS */
  443. void * calloc(size_t n, size_t lb)
  444. {
  445. if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
  446. && lb && n > GC_SIZE_MAX / lb)
  447. return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
  448. # if defined(GC_LINUX_THREADS)
  449. /* libpthread allocated some memory that is only pointed to by */
  450. /* mmapped thread stacks. Make sure it is not collectible. */
  451. {
  452. static GC_bool lib_bounds_set = FALSE;
  453. ptr_t caller = (ptr_t)__builtin_return_address(0);
  454. /* This test does not need to ensure memory visibility, since */
  455. /* the bounds will be set when/if we create another thread. */
  456. if (!EXPECT(lib_bounds_set, TRUE)) {
  457. GC_init_lib_bounds();
  458. lib_bounds_set = TRUE;
  459. }
  460. if (((word)caller >= (word)GC_libpthread_start
  461. && (word)caller < (word)GC_libpthread_end)
  462. || ((word)caller >= (word)GC_libld_start
  463. && (word)caller < (word)GC_libld_end))
  464. return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
  465. /* The two ranges are actually usually adjacent, so there may */
  466. /* be a way to speed this up. */
  467. }
  468. # endif
  469. return (void *)REDIRECT_MALLOC_F(n * lb);
  470. }
  471. # ifndef strdup
  472. char *strdup(const char *s)
  473. {
  474. size_t lb = strlen(s) + 1;
  475. char *result = (char *)REDIRECT_MALLOC_F(lb);
  476. if (result == 0) {
  477. errno = ENOMEM;
  478. return 0;
  479. }
  480. BCOPY(s, result, lb);
  481. return result;
  482. }
  483. # endif /* !defined(strdup) */
  484. /* If strdup is macro defined, we assume that it actually calls malloc, */
  485. /* and thus the right thing will happen even without overriding it. */
  486. /* This seems to be true on most Linux systems. */
  487. # ifndef strndup
  488. /* This is similar to strdup(). */
  489. char *strndup(const char *str, size_t size)
  490. {
  491. char *copy;
  492. size_t len = strlen(str);
  493. if (len > size)
  494. len = size;
  495. copy = (char *)REDIRECT_MALLOC_F(len + 1);
  496. if (copy == NULL) {
  497. errno = ENOMEM;
  498. return NULL;
  499. }
  500. if (EXPECT(len > 0, TRUE))
  501. BCOPY(str, copy, len);
  502. copy[len] = '\0';
  503. return copy;
  504. }
  505. # endif /* !strndup */
  506. # undef GC_debug_malloc_replacement
  507. #endif /* REDIRECT_MALLOC */
  508. #if !IL2CPP_ENABLE_WRITE_BARRIER_VALIDATION
  509. /* Explicitly deallocate an object p. */
  510. GC_API void GC_CALL GC_free(void * p)
  511. {
  512. struct hblk *h;
  513. hdr *hhdr;
  514. size_t sz; /* In bytes */
  515. size_t ngranules; /* sz in granules */
  516. int knd;
  517. struct obj_kind * ok;
  518. DCL_LOCK_STATE;
  519. if (p == 0) return;
  520. /* Required by ANSI. It's not my fault ... */
  521. # ifdef LOG_ALLOCS
  522. GC_log_printf("GC_free(%p) after GC #%lu\n",
  523. p, (unsigned long)GC_gc_no);
  524. # endif
  525. h = HBLKPTR(p);
  526. hhdr = HDR(h);
  527. # if defined(REDIRECT_MALLOC) && \
  528. ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
  529. || defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
  530. || defined(MSWIN32))
  531. /* This might be called indirectly by GC_print_callers to free */
  532. /* the result of backtrace_symbols. */
  533. /* For Solaris, we have to redirect malloc calls during */
  534. /* initialization. For the others, this seems to happen */
  535. /* implicitly. */
  536. /* Don't try to deallocate that memory. */
  537. if (0 == hhdr) return;
  538. # endif
  539. GC_ASSERT(GC_base(p) == p);
  540. sz = (size_t)hhdr->hb_sz;
  541. ngranules = BYTES_TO_GRANULES(sz);
  542. knd = hhdr -> hb_obj_kind;
  543. ok = &GC_obj_kinds[knd];
  544. if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
  545. void **flh;
  546. LOCK();
  547. GC_bytes_freed += sz;
  548. if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
  549. /* Its unnecessary to clear the mark bit. If the */
  550. /* object is reallocated, it doesn't matter. O.w. the */
  551. /* collector will do it, since it's on a free list. */
  552. if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
  553. BZERO((word *)p + 1, sz-sizeof(word));
  554. }
  555. flh = &(ok -> ok_freelist[ngranules]);
  556. obj_link(p) = *flh;
  557. *flh = (ptr_t)p;
  558. UNLOCK();
  559. } else {
  560. size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
  561. LOCK();
  562. GC_bytes_freed += sz;
  563. if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
  564. if (nblocks > 1) {
  565. GC_large_allocd_bytes -= nblocks * HBLKSIZE;
  566. }
  567. GC_freehblk(h);
  568. UNLOCK();
  569. }
  570. }
  571. #endif
  572. /* Explicitly deallocate an object p when we already hold lock. */
  573. /* Only used for internally allocated objects, so we can take some */
  574. /* shortcuts. */
  575. #ifdef THREADS
  576. GC_INNER void GC_free_inner(void * p)
  577. {
  578. struct hblk *h;
  579. hdr *hhdr;
  580. size_t sz; /* bytes */
  581. size_t ngranules; /* sz in granules */
  582. int knd;
  583. struct obj_kind * ok;
  584. h = HBLKPTR(p);
  585. hhdr = HDR(h);
  586. knd = hhdr -> hb_obj_kind;
  587. sz = (size_t)hhdr->hb_sz;
  588. ngranules = BYTES_TO_GRANULES(sz);
  589. ok = &GC_obj_kinds[knd];
  590. if (ngranules <= MAXOBJGRANULES) {
  591. void ** flh;
  592. GC_bytes_freed += sz;
  593. if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
  594. if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
  595. BZERO((word *)p + 1, sz-sizeof(word));
  596. }
  597. flh = &(ok -> ok_freelist[ngranules]);
  598. obj_link(p) = *flh;
  599. *flh = (ptr_t)p;
  600. } else {
  601. size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
  602. GC_bytes_freed += sz;
  603. if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
  604. if (nblocks > 1) {
  605. GC_large_allocd_bytes -= nblocks * HBLKSIZE;
  606. }
  607. GC_freehblk(h);
  608. }
  609. }
  610. #endif /* THREADS */
  611. #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
  612. # define REDIRECT_FREE GC_free
  613. #endif
  614. #if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
  615. # if defined(CPPCHECK)
  616. # define REDIRECT_FREE_F GC_free /* e.g. */
  617. # else
  618. # define REDIRECT_FREE_F REDIRECT_FREE
  619. # endif
  620. void free(void * p)
  621. {
  622. # ifndef IGNORE_FREE
  623. # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
  624. /* Don't bother with initialization checks. If nothing */
  625. /* has been initialized, the check fails, and that's safe, */
  626. /* since we have not allocated uncollectible objects neither. */
  627. ptr_t caller = (ptr_t)__builtin_return_address(0);
  628. /* This test does not need to ensure memory visibility, since */
  629. /* the bounds will be set when/if we create another thread. */
  630. if (((word)caller >= (word)GC_libpthread_start
  631. && (word)caller < (word)GC_libpthread_end)
  632. || ((word)caller >= (word)GC_libld_start
  633. && (word)caller < (word)GC_libld_end)) {
  634. GC_free(p);
  635. return;
  636. }
  637. # endif
  638. REDIRECT_FREE_F(p);
  639. # endif
  640. }
  641. #endif /* REDIRECT_FREE */