11#include "mmtk_julia.h"
22#include "mmtk.h"
3+ #include "gc-mmtk.h"
34#include "mmtk_julia_types.h"
45#include <stdbool.h>
56#include <stddef.h>
6- #include "gc.h"
7+ #include "gc-common.h"
8+ #include "threading.h"
79
810extern int64_t perm_scanned_bytes ;
9- extern gc_heapstatus_t gc_heap_stats ;
1011extern void run_finalizer (jl_task_t * ct , void * o , void * ff );
1112extern int gc_n_threads ;
1213extern jl_ptls_t * gc_all_tls_states ;
1314extern jl_value_t * cmpswap_names JL_GLOBALLY_ROOTED ;
1415extern jl_genericmemory_t * jl_global_roots_list JL_GLOBALLY_ROOTED ;
1516extern jl_genericmemory_t * jl_global_roots_keyset JL_GLOBALLY_ROOTED ;
1617extern jl_typename_t * jl_array_typename JL_GLOBALLY_ROOTED ;
17- extern void jl_gc_free_memory (jl_value_t * v , int isaligned );
1818extern long BI_METADATA_START_ALIGNED_DOWN ;
1919extern long BI_METADATA_END_ALIGNED_UP ;
2020extern uint64_t finalizer_rngState [JL_RNG_SIZE ];
2121extern const unsigned pool_sizes [];
22- extern void mmtk_store_obj_size_c (void * obj , size_t size );
23- extern void jl_gc_free_array (jl_array_t * a );
2422extern size_t mmtk_get_obj_size (void * obj );
2523extern void jl_rng_split (uint64_t to [JL_RNG_SIZE ], uint64_t from [JL_RNG_SIZE ]);
2624extern void _jl_free_stack (jl_ptls_t ptls , void * stkbuf , size_t bufsz );
@@ -29,6 +27,8 @@ extern jl_mutex_t finalizers_lock;
2927extern void jl_gc_wait_for_the_world (jl_ptls_t * gc_all_tls_states , int gc_n_threads );
3028extern void mmtk_block_thread_for_gc (void );
3129extern int64_t live_bytes ;
30+ extern void jl_throw_out_of_memory_error (void );
31+ extern uint32_t jl_get_gc_disable_counter (void );
3232
3333
3434extern void * new_mutator_iterator (void );
@@ -46,73 +46,32 @@ JL_DLLEXPORT void (jl_mmtk_harness_end)(void)
4646 mmtk_harness_end ();
4747}
4848
49- JL_DLLEXPORT jl_value_t * jl_mmtk_gc_alloc_default (jl_ptls_t ptls , int osize , size_t align , void * ty )
49+ // This is used in mmtk_sweep_malloced_memory and it is slightly different
50+ // from jl_gc_free_memory from gc-stock.c as the stock GC updates the
51+ // information in the global variable gc_heap_stats (which is specific to the stock GC)
52+ static void jl_gc_free_memory (jl_value_t * v , int isaligned ) JL_NOTSAFEPOINT
5053{
51- // safepoint
52- jl_gc_safepoint_ (ptls );
53-
54- jl_value_t * v ;
55- if ((uintptr_t )ty != jl_buff_tag ) {
56- // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header
57- jl_taggedvalue_t * v_tagged = (jl_taggedvalue_t * )mmtk_immix_alloc_fast (& ptls -> mmtk_mutator , LLT_ALIGN (osize , align ), align , sizeof (jl_taggedvalue_t ));
58- v = jl_valueof (v_tagged );
59- mmtk_immix_post_alloc_fast (& ptls -> mmtk_mutator , v , LLT_ALIGN (osize , align ));
60- } else {
61- // allocating an extra word to store the size of buffer objects
62- jl_taggedvalue_t * v_tagged = (jl_taggedvalue_t * )mmtk_immix_alloc_fast (& ptls -> mmtk_mutator , LLT_ALIGN (osize + sizeof (jl_taggedvalue_t ), align ), align , 0 );
63- jl_value_t * v_tagged_aligned = ((jl_value_t * )((char * )(v_tagged ) + sizeof (jl_taggedvalue_t )));
64- v = jl_valueof (v_tagged_aligned );
65- mmtk_store_obj_size_c (v , LLT_ALIGN (osize + sizeof (jl_taggedvalue_t ), align ));
66- mmtk_immix_post_alloc_fast (& ptls -> mmtk_mutator , v , LLT_ALIGN (osize + sizeof (jl_taggedvalue_t ), align ));
67- }
68-
69- ptls -> gc_tls .gc_num .allocd += osize ;
70- ptls -> gc_tls .gc_num .poolalloc ++ ;
71-
72- return v ;
73- }
74-
75- JL_DLLEXPORT jl_value_t * jl_mmtk_gc_alloc_big (jl_ptls_t ptls , size_t sz )
76- {
77- // safepoint
78- jl_gc_safepoint_ (ptls );
79-
80- size_t offs = offsetof(bigval_t , header );
81- assert (sz >= sizeof (jl_taggedvalue_t ) && "sz must include tag" );
82- static_assert (offsetof(bigval_t , header ) >= sizeof (void * ), "Empty bigval header?" );
83- static_assert (sizeof (bigval_t ) % JL_HEAP_ALIGNMENT == 0 , "" );
84- size_t allocsz = LLT_ALIGN (sz + offs , JL_CACHE_BYTE_ALIGNMENT );
85- if (allocsz < sz ) { // overflow in adding offs, size was "negative"
86- assert (0 && "Error when allocating big object" );
87- jl_throw (jl_memory_exception );
88- }
89-
90- bigval_t * v = (bigval_t * )mmtk_alloc_large (& ptls -> mmtk_mutator , allocsz , JL_CACHE_BYTE_ALIGNMENT , 0 , 2 );
91-
92- if (v == NULL ) {
93- assert (0 && "Allocation failed" );
94- jl_throw (jl_memory_exception );
95- }
96- v -> sz = allocsz ;
97-
98- ptls -> gc_tls .gc_num .allocd += allocsz ;
99- ptls -> gc_tls .gc_num .bigalloc ++ ;
100-
101- jl_value_t * result = jl_valueof (& v -> header );
102- mmtk_post_alloc (& ptls -> mmtk_mutator , result , allocsz , 2 );
103-
104- return result ;
54+ assert (jl_is_genericmemory (v ));
55+ jl_genericmemory_t * m = (jl_genericmemory_t * )v ;
56+ assert (jl_genericmemory_how (m ) == 1 || jl_genericmemory_how (m ) == 2 );
57+ char * d = (char * )m -> ptr ;
58+ if (isaligned )
59+ jl_free_aligned (d );
60+ else
61+ free (d );
62+ gc_num .freed += jl_genericmemory_nbytes (m );
63+ gc_num .freecall ++ ;
10564}
10665
10766static void mmtk_sweep_malloced_memory (void ) JL_NOTSAFEPOINT
10867{
10968 void * iter = new_mutator_iterator ();
11069 jl_ptls_t ptls2 = get_next_mutator_tls (iter );
11170 while (ptls2 != NULL ) {
112- mallocarray_t * ma = ptls2 -> gc_tls .heap .mallocarrays ;
113- mallocarray_t * * pma = & ptls2 -> gc_tls .heap .mallocarrays ;
71+ mallocmemory_t * ma = ptls2 -> gc_tls_common .heap .mallocarrays ;
72+ mallocmemory_t * * pma = & ptls2 -> gc_tls_common .heap .mallocarrays ;
11473 while (ma != NULL ) {
115- mallocarray_t * nxt = ma -> next ;
74+ mallocmemory_t * nxt = ma -> next ;
11675 jl_value_t * a = (jl_value_t * )((uintptr_t )ma -> a & ~1 );
11776 if (!mmtk_object_is_managed_by_mmtk (a )) {
11877 pma = & ma -> next ;
@@ -121,16 +80,16 @@ static void mmtk_sweep_malloced_memory(void) JL_NOTSAFEPOINT
12180 }
12281 if (mmtk_is_live_object (a )) {
12382 // if the array has been forwarded, the reference needs to be updated
124- jl_value_t * maybe_forwarded = (jl_value_t * )mmtk_get_possibly_forwared (ma -> a );
83+ jl_genericmemory_t * maybe_forwarded = (jl_genericmemory_t * )mmtk_get_possibly_forwared (ma -> a );
12584 ma -> a = maybe_forwarded ;
12685 pma = & ma -> next ;
12786 }
12887 else {
12988 * pma = nxt ;
13089 int isaligned = (uintptr_t )ma -> a & 1 ;
13190 jl_gc_free_memory (a , isaligned );
132- ma -> next = ptls2 -> gc_tls .heap .mafreelist ;
133- ptls2 -> gc_tls .heap .mafreelist = ma ;
91+ ma -> next = ptls2 -> gc_tls_common .heap .mafreelist ;
92+ ptls2 -> gc_tls_common .heap .mafreelist = ma ;
13493 }
13594 ma = nxt ;
13695 }
@@ -160,8 +119,8 @@ JL_DLLEXPORT void jl_gc_prepare_to_collect(void)
160119 jl_task_t * ct = jl_current_task ;
161120 jl_ptls_t ptls = ct -> ptls ;
162121 if (jl_atomic_load_acquire (& jl_gc_disable_counter )) {
163- size_t localbytes = jl_atomic_load_relaxed (& ptls -> gc_tls .gc_num .allocd ) + gc_num .interval ;
164- jl_atomic_store_relaxed (& ptls -> gc_tls .gc_num .allocd , - (int64_t )gc_num .interval );
122+ size_t localbytes = jl_atomic_load_relaxed (& ptls -> gc_tls_common .gc_num .allocd ) + gc_num .interval ;
123+ jl_atomic_store_relaxed (& ptls -> gc_tls_common .gc_num .allocd , - (int64_t )gc_num .interval );
165124 static_assert (sizeof (_Atomic (uint64_t )) == sizeof (gc_num .deferred_alloc ), "" );
166125 jl_atomic_fetch_add_relaxed ((_Atomic (uint64_t )* )& gc_num .deferred_alloc , localbytes );
167126 return ;
@@ -417,7 +376,7 @@ void mmtk_sweep_stack_pools(void)
417376
418377 // free half of stacks that remain unused since last sweep
419378 for (int p = 0 ; p < JL_N_STACK_POOLS ; p ++ ) {
420- small_arraylist_t * al = & ptls2 -> gc_tls .heap .free_stacks [p ];
379+ small_arraylist_t * al = & ptls2 -> gc_tls_common .heap .free_stacks [p ];
421380 size_t n_to_free ;
422381 if (jl_atomic_load_relaxed (& ptls2 -> current_task ) == NULL ) {
423382 n_to_free = al -> len ; // not alive yet or dead, so it does not need these anymore
@@ -439,10 +398,10 @@ void mmtk_sweep_stack_pools(void)
439398 }
440399 }
441400 if (jl_atomic_load_relaxed (& ptls2 -> current_task ) == NULL ) {
442- small_arraylist_free (ptls2 -> gc_tls .heap .free_stacks );
401+ small_arraylist_free (ptls2 -> gc_tls_common .heap .free_stacks );
443402 }
444403
445- small_arraylist_t * live_tasks = & ptls2 -> gc_tls .heap .live_tasks ;
404+ small_arraylist_t * live_tasks = & ptls2 -> gc_tls_common .heap .live_tasks ;
446405 size_t n = 0 ;
447406 size_t ndel = 0 ;
448407 size_t l = live_tasks -> len ;
@@ -456,16 +415,16 @@ void mmtk_sweep_stack_pools(void)
456415 live_tasks -> items [n ] = maybe_forwarded ;
457416 t = maybe_forwarded ;
458417 assert (jl_is_task (t ));
459- if (t -> stkbuf == NULL )
418+ if (t -> ctx . stkbuf == NULL )
460419 ndel ++ ; // jl_release_task_stack called
461420 else
462421 n ++ ;
463422 } else {
464423 ndel ++ ;
465- void * stkbuf = t -> stkbuf ;
466- size_t bufsz = t -> bufsz ;
424+ void * stkbuf = t -> ctx . stkbuf ;
425+ size_t bufsz = t -> ctx . bufsz ;
467426 if (stkbuf ) {
468- t -> stkbuf = NULL ;
427+ t -> ctx . stkbuf = NULL ;
469428 _jl_free_stack (ptls2 , stkbuf , bufsz );
470429 }
471430#ifdef _COMPILER_TSAN_ENABLED_
@@ -577,14 +536,15 @@ uintptr_t get_abi_structs_checksum_c(void) {
577536 ^ print_sizeof (mmtk_jl_task_t )
578537 ^ print_sizeof (mmtk_jl_weakref_t )
579538 ^ print_sizeof (mmtk_jl_tls_states_t )
580- ^ print_sizeof (mmtk_jl_thread_heap_t )
581- ^ print_sizeof (mmtk_jl_thread_gc_num_t );
539+ ^ print_sizeof (mmtk_jl_thread_heap_common_t )
540+ ^ print_sizeof (mmtk_jl_thread_gc_num_common_t );
582541}
583542
584543Julia_Upcalls mmtk_upcalls = (Julia_Upcalls ) {
585544 .scan_julia_exc_obj = scan_julia_exc_obj ,
586545 .get_stackbase = get_stackbase ,
587546 .jl_throw_out_of_memory_error = jl_throw_out_of_memory_error ,
547+ .jl_get_gc_disable_counter = jl_get_gc_disable_counter ,
588548 .sweep_malloced_memory = mmtk_sweep_malloced_memory ,
589549 .sweep_stack_pools = mmtk_sweep_stack_pools ,
590550 .wait_in_a_safepoint = mmtk_wait_in_a_safepoint ,
0 commit comments