Ruby 4.1.0dev (2026-05-15 revision 4ec235e0b227d38426aa477e537ac397963c0ee8)
mmtk.c
1#include <pthread.h>
2#include <stdbool.h>
3
4#include "ruby/assert.h"
5#include "ruby/atomic.h"
6#include "ruby/debug.h"
7
8#include "gc/gc.h"
9#include "gc/gc_impl.h"
10#include "gc/mmtk/mmtk.h"
11
12#include "ccan/list/list.h"
13#include "darray.h"
14
15#ifdef __APPLE__
16#include <sys/sysctl.h>
17#endif
18
19#ifndef VM_CHECK_MODE
20# define VM_CHECK_MODE RUBY_DEBUG
21#endif
22
23// From ractor_core.h
24#ifndef RACTOR_CHECK_MODE
25# define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
26#endif
27
28#if RACTOR_CHECK_MODE
29# define RVALUE_SUFFIX_SIZE sizeof(VALUE)
30void rb_ractor_setup_belonging(VALUE obj);
31#else
32# define RVALUE_SUFFIX_SIZE 0
33#endif
34
35struct objspace {
36 bool measure_gc_time;
37 bool gc_stress;
38
39 size_t gc_count;
40 size_t moving_gc_count;
41 size_t total_gc_time;
42 size_t total_allocated_objects;
43
44 st_table *finalizer_table;
45 struct MMTk_final_job *finalizer_jobs;
46 rb_postponed_job_handle_t finalizer_postponed_job;
47
48 struct ccan_list_head ractor_caches;
49 unsigned long live_ractor_cache_count;
50
51 pthread_mutex_t mutex;
52 rb_atomic_t mutator_blocking_count;
53 bool world_stopped;
54 pthread_cond_t cond_world_stopped;
55 pthread_cond_t cond_world_started;
56 size_t start_the_world_count;
57
58 pthread_mutex_t event_hook_mutex;
59
60 struct {
61 bool gc_thread_crashed;
62 char crash_msg[256];
63 } crash_context;
64
65 struct rb_gc_vm_context vm_context;
66
67 unsigned int fork_hook_vm_lock_lev;
68};
69
70#define OBJ_FREE_BUF_CAPACITY 128
71
73 struct ccan_list_node list_node;
74
75 MMTk_Mutator *mutator;
76 bool gc_mutator_p;
77
78 MMTk_BumpPointer *bump_pointer;
79
80 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
81 size_t obj_free_parallel_count;
82 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
83 size_t obj_free_non_parallel_count;
84};
85
87 struct MMTk_final_job *next;
88 enum {
89 MMTK_FINAL_JOB_DFREE,
90 MMTK_FINAL_JOB_FINALIZE,
91 } kind;
92 union {
93 struct {
94 void (*func)(void *);
95 void *data;
96 } dfree;
97 struct {
98 /* HACK: we store the object ID on the 0th element of this array. */
99 VALUE finalizer_array;
100 } finalize;
101 } as;
102};
103
104#ifdef RB_THREAD_LOCAL_SPECIFIER
105RB_THREAD_LOCAL_SPECIFIER struct MMTk_GCThreadTLS *rb_mmtk_gc_thread_tls;
106
107RB_THREAD_LOCAL_SPECIFIER VALUE marking_parent_object;
108#else
109# error We currently need language-supported TLS
110#endif
111
112#ifdef MMTK_DEBUG
113# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
114#else
115# define MMTK_ASSERT(expr, ...) ((void)0)
116#endif
117
118#include <pthread.h>
119
120static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
121
122static void
123rb_mmtk_init_gc_worker_thread(MMTk_VMWorkerThread gc_thread_tls)
124{
125 rb_mmtk_gc_thread_tls = gc_thread_tls;
126}
127
128static bool
129rb_mmtk_is_mutator(void)
130{
131 return ruby_native_thread_p();
132}
133
134static void
135rb_mmtk_stop_the_world(void)
136{
137 struct objspace *objspace = rb_gc_get_objspace();
138
139 int err;
140 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
141 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
142 }
143
144 while (!objspace->world_stopped) {
145 pthread_cond_wait(&objspace->cond_world_stopped, &objspace->mutex);
146 }
147
148 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
149 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
150 }
151}
152
153static void
154rb_mmtk_resume_mutators(bool current_gc_may_move)
155{
156 struct objspace *objspace = rb_gc_get_objspace();
157
158 int err;
159 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
160 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
161 }
162
163 objspace->world_stopped = false;
164 objspace->gc_count++;
165 if (current_gc_may_move) objspace->moving_gc_count++;
166 pthread_cond_broadcast(&objspace->cond_world_started);
167
168 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
169 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
170 }
171}
172
173static void mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache);
174
175static void
176rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator)
177{
178 struct objspace *objspace = rb_gc_get_objspace();
179
180 size_t starting_gc_count = objspace->gc_count;
181 RUBY_ATOMIC_INC(objspace->mutator_blocking_count);
182 int lock_lev = RB_GC_VM_LOCK();
183 RUBY_ATOMIC_DEC(objspace->mutator_blocking_count);
184 int err;
185 if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) {
186 rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err));
187 }
188
189 if (objspace->gc_count == starting_gc_count) {
190 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
191
192 rb_gc_initialize_vm_context(&objspace->vm_context);
193
194 mutator->gc_mutator_p = true;
195
196 struct timespec gc_start_time;
197 if (objspace->measure_gc_time) {
198 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
199 }
200
201 rb_gc_save_machine_context();
202
203 rb_gc_vm_barrier();
204
205 struct MMTk_ractor_cache *rc;
206 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
207 mmtk_flush_obj_free_buffer(rc);
208 }
209
210 objspace->world_stopped = true;
211
212 pthread_cond_broadcast(&objspace->cond_world_stopped);
213
214 // Wait for GC end
215 while (objspace->world_stopped) {
216 pthread_cond_wait(&objspace->cond_world_started, &objspace->mutex);
217 }
218
219 if (RB_UNLIKELY(objspace->crash_context.gc_thread_crashed)) {
220 rb_bug("%s", objspace->crash_context.crash_msg);
221 }
222
223 if (objspace->measure_gc_time) {
224 struct timespec gc_end_time;
225 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
226
227 objspace->total_gc_time +=
228 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
229 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
230 }
231 }
232
233 if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) {
234 rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err));
235 }
236 RB_GC_VM_UNLOCK(lock_lev);
237}
238
239static void
240rb_mmtk_before_updating_jit_code(void)
241{
242 rb_gc_before_updating_jit_code();
243}
244
245static void
246rb_mmtk_after_updating_jit_code(void)
247{
248 rb_gc_after_updating_jit_code();
249}
250
251static size_t
252rb_mmtk_number_of_mutators(void)
253{
254 struct objspace *objspace = rb_gc_get_objspace();
255 return objspace->live_ractor_cache_count;
256}
257
258static void
259rb_mmtk_get_mutators(void (*visit_mutator)(MMTk_Mutator *mutator, void *data), void *data)
260{
261 struct objspace *objspace = rb_gc_get_objspace();
262 struct MMTk_ractor_cache *ractor_cache;
263
264 ccan_list_for_each(&objspace->ractor_caches, ractor_cache, list_node) {
265 visit_mutator(ractor_cache->mutator, data);
266 }
267}
268
269static void
270rb_mmtk_scan_gc_roots(void)
271{
272 struct objspace *objspace = rb_gc_get_objspace();
273
274 rb_gc_mark_roots(objspace, NULL);
275}
276
277static int
278pin_value(st_data_t key, st_data_t value, st_data_t data)
279{
280 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
281
282 return ST_CONTINUE;
283}
284
285static void
286rb_mmtk_scan_objspace(void)
287{
288 struct objspace *objspace = rb_gc_get_objspace();
289
290 if (objspace->finalizer_table != NULL) {
291 st_foreach(objspace->finalizer_table, pin_value, (st_data_t)objspace);
292 }
293
294 struct MMTk_final_job *job = objspace->finalizer_jobs;
295 while (job != NULL) {
296 switch (job->kind) {
297 case MMTK_FINAL_JOB_DFREE:
298 break;
299 case MMTK_FINAL_JOB_FINALIZE:
300 rb_gc_impl_mark(objspace, job->as.finalize.finalizer_array);
301 break;
302 default:
303 rb_bug("rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
304 }
305
306 job = job->next;
307 }
308}
309
310static void
311rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
312{
313 rb_gc_move_obj_during_marking((VALUE)from, (VALUE)to);
314}
315
316static void
317rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
318{
319 VALUE object = (VALUE)mmtk_object;
320
321 if (!RB_FL_TEST(object, RUBY_FL_WEAK_REFERENCE)) {
322 marking_parent_object = object;
323 rb_gc_update_object_references(rb_gc_get_objspace(), object);
324 marking_parent_object = 0;
325 }
326}
327
328static void
329rb_mmtk_call_gc_mark_children(MMTk_ObjectReference object)
330{
331 marking_parent_object = (VALUE)object;
332 rb_gc_mark_children(rb_gc_get_objspace(), (VALUE)object);
333 marking_parent_object = 0;
334}
335
336static void
337rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object, bool moving)
338{
339 VALUE object = (VALUE)mmtk_object;
340
341 marking_parent_object = object;
342
343 rb_gc_handle_weak_references(object);
344
345 if (moving) {
346 rb_gc_update_object_references(rb_gc_get_objspace(), object);
347 }
348
349 marking_parent_object = 0;
350}
351
352static void
353rb_mmtk_call_obj_free(MMTk_ObjectReference object)
354{
355 VALUE obj = (VALUE)object;
356 struct objspace *objspace = rb_gc_get_objspace();
357
358 if (RB_UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_FREEOBJ))) {
359 pthread_mutex_lock(&objspace->event_hook_mutex);
360 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_FREEOBJ);
361 pthread_mutex_unlock(&objspace->event_hook_mutex);
362 }
363
364 rb_gc_obj_free(objspace, obj);
365
366#ifdef MMTK_DEBUG
367 memset((void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
368#endif
369}
370
371static size_t
372rb_mmtk_vm_live_bytes(void)
373{
374 return 0;
375}
376
377static void
378make_final_job(struct objspace *objspace, VALUE obj, VALUE table)
379{
380 MMTK_ASSERT(RB_BUILTIN_TYPE(table) == T_ARRAY);
381
382 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
383 job->next = objspace->finalizer_jobs;
384 job->kind = MMTK_FINAL_JOB_FINALIZE;
385 job->as.finalize.finalizer_array = table;
386
387 objspace->finalizer_jobs = job;
388}
389
390static int
391rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data, int error)
392{
393 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
394 MMTK_ASSERT(RB_BUILTIN_TYPE(value) == T_ARRAY);
395
396 struct objspace *objspace = (struct objspace *)data;
397
398 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
399 VALUE new_key_location = rb_mmtk_call_object_closure((VALUE)key, false);
400
401 MMTK_ASSERT(RB_FL_TEST(new_key_location, RUBY_FL_FINALIZE));
402
403 if (new_key_location != key) {
404 return ST_REPLACE;
405 }
406 }
407 else {
408 make_final_job(objspace, (VALUE)key, (VALUE)value);
409
410 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
411
412 return ST_DELETE;
413 }
414
415 return ST_CONTINUE;
416}
417
418static int
419rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
420{
421 *key = rb_mmtk_call_object_closure((VALUE)*key, false);
422
423 return ST_CONTINUE;
424}
425
426static void
427rb_mmtk_update_finalizer_table(void)
428{
429 struct objspace *objspace = rb_gc_get_objspace();
430
431 st_foreach_with_replace(
432 objspace->finalizer_table,
433 rb_mmtk_update_finalizer_table_i,
434 rb_mmtk_update_finalizer_table_replace_i,
435 (st_data_t)objspace
436 );
437}
438
439static int
440rb_mmtk_global_tables_count(void)
441{
442 return RB_GC_VM_WEAK_TABLE_COUNT;
443}
444
445static inline VALUE rb_mmtk_call_object_closure(VALUE obj, bool pin);
446
447static int
448rb_mmtk_update_global_tables_i(VALUE val, void *data)
449{
450 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
451 return ST_DELETE;
452 }
453
454 // TODO: check only if in moving GC
455 if (rb_mmtk_call_object_closure(val, false) != val) {
456 return ST_REPLACE;
457 }
458
459 return ST_CONTINUE;
460}
461
462static int
463rb_mmtk_update_global_tables_replace_i(VALUE *ptr, void *data)
464{
465 // TODO: cache the new location so we don't call rb_mmtk_call_object_closure twice
466 *ptr = rb_mmtk_call_object_closure(*ptr, false);
467
468 return ST_CONTINUE;
469}
470
471static void
472rb_mmtk_update_global_tables(int table, bool moving)
473{
474 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
475
476 rb_gc_vm_weak_table_foreach(
477 rb_mmtk_update_global_tables_i,
478 rb_mmtk_update_global_tables_replace_i,
479 NULL,
480 !moving,
481 (enum rb_gc_vm_weak_tables)table
482 );
483}
484
485static bool
486rb_mmtk_special_const_p(MMTk_ObjectReference object)
487{
488 VALUE obj = (VALUE)object;
489
490 return RB_SPECIAL_CONST_P(obj);
491}
492
493RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 1, 2)
494static void
495rb_mmtk_gc_thread_bug(const char *msg, ...)
496{
497 struct objspace *objspace = rb_gc_get_objspace();
498
499 objspace->crash_context.gc_thread_crashed = true;
500
501 va_list args;
502 va_start(args, msg);
503 vsnprintf(objspace->crash_context.crash_msg, sizeof(objspace->crash_context.crash_msg), msg, args);
504 va_end(args);
505
506 fprintf(stderr, "-- GC thread backtrace "
507 "-------------------------------------------\n");
508 rb_gc_print_backtrace();
509 fprintf(stderr, "\n");
510
511 rb_mmtk_resume_mutators(false);
512
513 sleep(5);
514
515 rb_bug("rb_mmtk_gc_thread_bug");
516}
517
518static void
519rb_mmtk_gc_thread_panic_handler(void)
520{
521 rb_mmtk_gc_thread_bug("MMTk GC thread panicked");
522}
523
524static void
525rb_mmtk_mutator_thread_panic_handler(void)
526{
527 rb_bug("Ruby mutator thread panicked");
528}
529
530// Bootup
531MMTk_RubyUpcalls ruby_upcalls = {
532 rb_mmtk_init_gc_worker_thread,
533 rb_mmtk_is_mutator,
534 rb_mmtk_stop_the_world,
535 rb_mmtk_resume_mutators,
536 rb_mmtk_block_for_gc,
537 rb_mmtk_before_updating_jit_code,
538 rb_mmtk_after_updating_jit_code,
539 rb_mmtk_number_of_mutators,
540 rb_mmtk_get_mutators,
541 rb_mmtk_scan_gc_roots,
542 rb_mmtk_scan_objspace,
543 rb_mmtk_move_obj_during_marking,
544 rb_mmtk_update_object_references,
545 rb_mmtk_call_gc_mark_children,
546 rb_mmtk_handle_weak_references,
547 rb_mmtk_call_obj_free,
548 rb_mmtk_vm_live_bytes,
549 rb_mmtk_update_global_tables,
550 rb_mmtk_global_tables_count,
551 rb_mmtk_update_finalizer_table,
552 rb_mmtk_special_const_p,
553 rb_mmtk_mutator_thread_panic_handler,
554 rb_mmtk_gc_thread_panic_handler,
555};
556
557// Use max 80% of the available memory by default for MMTk
558#define RB_MMTK_HEAP_LIMIT_PERC 80
559#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
560#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
561
562enum mmtk_heap_mode {
563 RB_MMTK_DYNAMIC_HEAP,
564 RB_MMTK_FIXED_HEAP
565};
566
567MMTk_Builder *
568rb_mmtk_builder_init(void)
569{
570 MMTk_Builder *builder = mmtk_builder_default();
571 return builder;
572}
573
574void *
575rb_gc_impl_objspace_alloc(void)
576{
577 MMTk_Builder *builder = rb_mmtk_builder_init();
578 MMTk_RubyBindingOptions binding_options = {
579 .ractor_check_mode = RACTOR_CHECK_MODE != 0,
580 .suffix_size = RVALUE_SUFFIX_SIZE,
581 };
582 mmtk_init_binding(builder, &binding_options, &ruby_upcalls);
583
584 return calloc(1, sizeof(struct objspace));
585}
586
587static void gc_run_finalizers(void *data);
588
589void
590rb_gc_impl_objspace_init(void *objspace_ptr)
591{
592 struct objspace *objspace = objspace_ptr;
593
594 objspace->measure_gc_time = true;
595
596 objspace->finalizer_table = st_init_numtable();
597 objspace->finalizer_postponed_job = rb_postponed_job_preregister(0, gc_run_finalizers, objspace);
598
599 ccan_list_head_init(&objspace->ractor_caches);
600
601 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
602 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
603 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
604
605 objspace->event_hook_mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
606}
607
608void
609rb_gc_impl_objspace_free(void *objspace_ptr)
610{
611 free(objspace_ptr);
612}
613
614void *
615rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
616{
617 struct objspace *objspace = objspace_ptr;
618 if (objspace->live_ractor_cache_count == 0) {
619 mmtk_initialize_collection(ractor);
620 }
621 objspace->live_ractor_cache_count++;
622
623 struct MMTk_ractor_cache *cache = calloc(1, sizeof(struct MMTk_ractor_cache));
624 ccan_list_add(&objspace->ractor_caches, &cache->list_node);
625
626 cache->mutator = mmtk_bind_mutator(cache);
627 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
628
629 return cache;
630}
631
632void
633rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache_ptr)
634{
635 struct objspace *objspace = objspace_ptr;
636 struct MMTk_ractor_cache *cache = cache_ptr;
637
638 ccan_list_del(&cache->list_node);
639
640 mmtk_flush_obj_free_buffer(cache);
641
642 if (ruby_free_at_exit_p()) {
643 MMTK_ASSERT(objspace->live_ractor_cache_count > 0);
644 }
645 else {
646 MMTK_ASSERT(objspace->live_ractor_cache_count > 1);
647 }
648
649 objspace->live_ractor_cache_count--;
650
651 mmtk_destroy_mutator(cache->mutator);
652}
653
654void rb_gc_impl_set_params(void *objspace_ptr) { }
655
656static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
657
658#if SIZEOF_VALUE >= 8
659#define MMTK_HEAP_COUNT 12
660#define MMTK_MAX_OBJ_SIZE 1024
661static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
662 32, 40, 64, 80, 96, 128, 160, 256, 512, 640, 768, MMTK_MAX_OBJ_SIZE, 0
663};
664#else
665#define MMTK_HEAP_COUNT 5
666#define MMTK_MAX_OBJ_SIZE 512
667static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
668 32, 64, 128, 256, MMTK_MAX_OBJ_SIZE, 0
669};
670#endif
671
672void
673rb_gc_impl_init(void)
674{
675 VALUE gc_constants = rb_hash_new();
676 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(SIZEOF_VALUE >= 8 ? 64 : 32));
677 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
678 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
679 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
680 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(MMTK_HEAP_COUNT));
681 // TODO: correctly set RVALUE_OLD_AGE when we have generational GC support
682 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), INT2FIX(0));
683 OBJ_FREEZE(gc_constants);
684 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
685
686 // no-ops for compatibility
687 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
688
692 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
693 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
694}
695
696size_t *
697rb_gc_impl_heap_sizes(void *objspace_ptr)
698{
699 return heap_sizes;
700}
701
702int
703rb_mmtk_obj_free_iter_wrapper(VALUE obj, void *data)
704{
705 struct objspace *objspace = data;
706
707 if (!RB_TYPE_P(obj, T_NONE)) {
708 rb_gc_obj_free_vm_weak_references(obj);
709 rb_gc_obj_free(objspace, obj);
710 }
711
712 return 0;
713}
714
715// Shutdown
716static void each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data);
717
718void
719rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
720{
721 mmtk_set_gc_enabled(false);
722 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
723 mmtk_set_gc_enabled(true);
724}
725
726// GC
727void
728rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
729{
730 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(), true, full_mark);
731}
732
733bool
734rb_gc_impl_during_gc_p(void *objspace_ptr)
735{
736 struct objspace *objspace = objspace_ptr;
737 return objspace->world_stopped;
738}
739
740static void
741rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj, void *d)
742{
743 rb_gc_prepare_heap_process_object((VALUE)obj);
744}
745
746void
747rb_gc_impl_prepare_heap(void *objspace_ptr)
748{
749 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
750}
751
752void
753rb_gc_impl_gc_enable(void *objspace_ptr)
754{
755 mmtk_set_gc_enabled(true);
756}
757
758void
759rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
760{
761 mmtk_set_gc_enabled(false);
762}
763
764bool
765rb_gc_impl_gc_enabled_p(void *objspace_ptr)
766{
767 return mmtk_gc_enabled_p();
768}
769
770void
771rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
772{
773 struct objspace *objspace = objspace_ptr;
774
775 objspace->gc_stress = RTEST(flag);
776}
777
778VALUE
779rb_gc_impl_stress_get(void *objspace_ptr)
780{
781 struct objspace *objspace = objspace_ptr;
782
783 return objspace->gc_stress ? Qtrue : Qfalse;
784}
785
786VALUE
787rb_gc_impl_config_get(void *objspace_ptr)
788{
789 VALUE hash = rb_hash_new();
790
791 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_worker_count")), RB_ULONG2NUM(mmtk_worker_count()));
792 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_plan")), rb_str_new_cstr((const char *)mmtk_plan()));
793 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_mode")), rb_str_new_cstr((const char *)mmtk_heap_mode()));
794 size_t heap_min = mmtk_heap_min();
795 if (heap_min > 0) rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_min")), RB_ULONG2NUM(heap_min));
796 rb_hash_aset(hash, ID2SYM(rb_intern_const("mmtk_heap_max")), RB_ULONG2NUM(mmtk_heap_max()));
797
798 return hash;
799}
800
801void
802rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
803{
804 // TODO
805}
806
807struct rb_gc_vm_context *
808rb_gc_impl_get_vm_context(void *objspace_ptr)
809{
810 struct objspace *objspace = objspace_ptr;
811
812 return &objspace->vm_context;
813}
814
815// Object allocation
816
817static VALUE
818rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *ractor_cache, size_t size)
819{
820 MMTk_BumpPointer *bump_pointer = ractor_cache->bump_pointer;
821 if (bump_pointer == NULL) return 0;
822
823 uintptr_t new_cursor = bump_pointer->cursor + size;
824
825 if (new_cursor > bump_pointer->limit) {
826 return 0;
827 }
828 else {
829 VALUE obj = (VALUE)bump_pointer->cursor;
830 bump_pointer->cursor = new_cursor;
831 return obj;
832 }
833}
834
835static bool
836obj_can_parallel_free_p(VALUE obj)
837{
838 switch (RB_BUILTIN_TYPE(obj)) {
839 case T_ARRAY:
840 case T_BIGNUM:
841 case T_COMPLEX:
842 case T_FLOAT:
843 case T_HASH:
844 case T_OBJECT:
845 case T_RATIONAL:
846 case T_REGEXP:
847 case T_STRING:
848 case T_STRUCT:
849 case T_SYMBOL:
850 return true;
851 default:
852 return false;
853 }
854}
855
856static void
857mmtk_flush_obj_free_buffer(struct MMTk_ractor_cache *cache)
858{
859 if (cache->obj_free_parallel_count > 0) {
860 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
861 cache->obj_free_parallel_count, true);
862 cache->obj_free_parallel_count = 0;
863 }
864 if (cache->obj_free_non_parallel_count > 0) {
865 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
866 cache->obj_free_non_parallel_count, false);
867 cache->obj_free_non_parallel_count = 0;
868 }
869}
870
871static inline void
872mmtk_buffer_obj_free_candidate(struct MMTk_ractor_cache *cache, VALUE obj)
873{
874 if (obj_can_parallel_free_p(obj)) {
875 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
876 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
877 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
878 cache->obj_free_parallel_count, true);
879 cache->obj_free_parallel_count = 0;
880 }
881 }
882 else {
883 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
884 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
885 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
886 cache->obj_free_non_parallel_count, false);
887 cache->obj_free_non_parallel_count = 0;
888 }
889 }
890}
891
892VALUE
893rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
894{
895#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
896 struct objspace *objspace = objspace_ptr;
897 struct MMTk_ractor_cache *ractor_cache = cache_ptr;
898
899 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
900 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
901 if (alloc_size == heap_sizes[i]) break;
902 if (alloc_size < heap_sizes[i]) {
903 alloc_size = heap_sizes[i];
904 break;
905 }
906 }
907
908 if (objspace->gc_stress) {
909 mmtk_handle_user_collection_request(ractor_cache, false, false);
910 }
911
912 // Layout: [hidden size header (sizeof(VALUE))][payload (alloc_size)][suffix (RVALUE_SUFFIX_SIZE)]
913 alloc_size += sizeof(VALUE) + RVALUE_SUFFIX_SIZE;
914
915 VALUE *alloc_obj = (VALUE *)rb_mmtk_alloc_fast_path(objspace, ractor_cache, alloc_size);
916 if (!alloc_obj) {
917 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
918 }
919
920 alloc_obj++;
921 alloc_obj[-1] = alloc_size - sizeof(VALUE) - RVALUE_SUFFIX_SIZE;
922 alloc_obj[0] = flags;
923 alloc_obj[1] = klass;
924
925 // TODO: implement fast path for mmtk_post_alloc
926 mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
927
928 // TODO: only add when object needs obj_free to be called
929 mmtk_buffer_obj_free_candidate(ractor_cache, (VALUE)alloc_obj);
930
931 objspace->total_allocated_objects++;
932
933#if RACTOR_CHECK_MODE
934 rb_ractor_setup_belonging((VALUE)alloc_obj);
935#endif
936
937 return (VALUE)alloc_obj;
938}
939
940size_t
941rb_gc_impl_obj_slot_size(VALUE obj)
942{
943 return ((VALUE *)obj)[-1];
944}
945
946size_t
947rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
948{
949 for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
950 if (size == heap_sizes[i]) return i;
951 if (size < heap_sizes[i]) return i;
952 }
953
954 rb_bug("size too big");
955}
956
957bool
958rb_gc_impl_size_allocatable_p(size_t size)
959{
960 return size <= MMTK_MAX_OBJ_SIZE;
961}
962
963// Malloc
964void *
965rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
966{
967 // TODO: don't use system malloc
968 return malloc(size);
969}
970
971void *
972rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
973{
974 // TODO: don't use system calloc
975 return calloc(1, size);
976}
977
978void *
979rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
980{
981 // TODO: don't use system realloc
982 return realloc(ptr, new_size);
983}
984
985void
986rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
987{
988 // TODO: don't use system free
989 free(ptr);
990}
991
992void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff) { }
993
994// Marking
995static inline VALUE
996rb_mmtk_call_object_closure(VALUE obj, bool pin)
997{
998 if (RB_UNLIKELY(RB_BUILTIN_TYPE(obj) == T_NONE)) {
999 const size_t info_size = 256;
1000 char obj_info_buf[info_size];
1001 rb_raw_obj_info(obj_info_buf, info_size, obj);
1002
1003 char parent_obj_info_buf[info_size];
1004 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
1005
1006 rb_mmtk_gc_thread_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
1007 }
1008
1009 return (VALUE)rb_mmtk_gc_thread_tls->object_closure.c_function(
1010 rb_mmtk_gc_thread_tls->object_closure.rust_closure,
1011 rb_mmtk_gc_thread_tls->gc_context,
1012 (MMTk_ObjectReference)obj,
1013 pin
1014 );
1015}
1016
1017void
1018rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
1019{
1020 if (RB_SPECIAL_CONST_P(obj)) return;
1021
1022 rb_mmtk_call_object_closure(obj, false);
1023}
1024
1025void
1026rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
1027{
1028 if (RB_SPECIAL_CONST_P(*ptr)) return;
1029
1030 VALUE new_obj = rb_mmtk_call_object_closure(*ptr, false);
1031 if (new_obj != *ptr) {
1032 *ptr = new_obj;
1033 }
1034}
1035
1036void
1037rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
1038{
1039 if (RB_SPECIAL_CONST_P(obj)) return;
1040
1041 rb_mmtk_call_object_closure(obj, true);
1042}
1043
1044void
1045rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
1046{
1047 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (const void *)obj)) {
1048 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1049 }
1050}
1051
1052void
1053rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
1054{
1056 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1057}
1058
1059bool
1060rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
1061{
1062 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1063}
1064
1065// Compaction
1066void
1067rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
1068{
1069 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1070}
1071
1072bool
1073rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
1074{
1075 return rb_mmtk_call_object_closure(obj, false) != obj;
1076}
1077
1078VALUE
1079rb_gc_impl_location(void *objspace_ptr, VALUE obj)
1080{
1081 return rb_mmtk_call_object_closure(obj, false);
1082}
1083
1084// Write barriers
1085void
1086rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
1087{
1088 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1089
1090 if (SPECIAL_CONST_P(b)) return;
1091
1092#ifdef MMTK_DEBUG
1093 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)a)) {
1094 char buff[256];
1095 rb_bug("a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1096 }
1097
1098 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (void *)b)) {
1099 char buff[256];
1100 rb_bug("b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1101 }
1102#endif
1103
1104 MMTK_ASSERT(BUILTIN_TYPE(a) != T_NONE);
1105 MMTK_ASSERT(BUILTIN_TYPE(b) != T_NONE);
1106
1107 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1108}
1109
1110void
1111rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
1112{
1113 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1114}
1115
1116void
1117rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
1118{
1119 struct MMTk_ractor_cache *cache = rb_gc_get_ractor_newobj_cache();
1120
1121 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1122}
1123
1124// Heap walking
1125static void
1126each_objects_i(MMTk_ObjectReference obj, void *d)
1127{
1128 rb_darray(VALUE) *objs = d;
1129
1130 rb_darray_append(objs, (VALUE)obj);
1131}
1132
1133static void
1134each_object(struct objspace *objspace, int (*func)(VALUE, void *), void *data)
1135{
1136 rb_darray(VALUE) objs;
1137 rb_darray_make(&objs, 0);
1138
1139 mmtk_enumerate_objects(each_objects_i, &objs);
1140
1141 VALUE *obj_ptr;
1142 rb_darray_foreach(objs, i, obj_ptr) {
1143 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr)) continue;
1144
1145 if (func(*obj_ptr, data) != 0) {
1146 break;
1147 }
1148 }
1149
1150 rb_darray_free(objs);
1151}
1152
1154 int (*func)(void *, void *, size_t, void *);
1155 void *data;
1156};
1157
1158static int
1159rb_gc_impl_each_objects_i(VALUE obj, void *d)
1160{
1161 struct rb_gc_impl_each_objects_data *data = d;
1162
1163 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1164
1165 return data->func((void *)obj, (void *)(obj + slot_size), slot_size, data->data);
1166}
1167
1168void
1169rb_gc_impl_each_objects(void *objspace_ptr, int (*func)(void *, void *, size_t, void *), void *data)
1170{
1171 struct rb_gc_impl_each_objects_data each_objects_data = {
1172 .func = func,
1173 .data = data
1174 };
1175
1176 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1177}
1178
1180 void (*func)(VALUE, void *);
1181 void *data;
1182};
1183
1184static int
1185rb_gc_impl_each_object_i(VALUE obj, void *d)
1186{
1187 struct rb_gc_impl_each_object_data *data = d;
1188
1189 data->func(obj, data->data);
1190
1191 return 0;
1192}
1193
1194void
1195rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE, void *), void *data)
1196{
1197 struct rb_gc_impl_each_object_data each_object_data = {
1198 .func = func,
1199 .data = data
1200 };
1201
1202 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1203}
1204
1205// Finalizers
1206static VALUE
1207gc_run_finalizers_get_final(long i, void *data)
1208{
1209 VALUE table = (VALUE)data;
1210
1211 return RARRAY_AREF(table, i + 1);
1212}
1213
1214static void
1215gc_run_finalizers(void *data)
1216{
1217 struct objspace *objspace = data;
1218
1219 rb_gc_set_pending_interrupt();
1220
1221 while (objspace->finalizer_jobs != NULL) {
1222 struct MMTk_final_job *job = objspace->finalizer_jobs;
1223 objspace->finalizer_jobs = job->next;
1224
1225 switch (job->kind) {
1226 case MMTK_FINAL_JOB_DFREE:
1227 job->as.dfree.func(job->as.dfree.data);
1228 break;
1229 case MMTK_FINAL_JOB_FINALIZE: {
1230 VALUE finalizer_array = job->as.finalize.finalizer_array;
1231
1232 rb_gc_run_obj_finalizer(
1233 RARRAY_AREF(finalizer_array, 0),
1234 RARRAY_LEN(finalizer_array) - 1,
1235 gc_run_finalizers_get_final,
1236 (void *)finalizer_array
1237 );
1238
1239 RB_GC_GUARD(finalizer_array);
1240 break;
1241 }
1242 }
1243
1244 xfree(job);
1245 }
1246
1247 rb_gc_unset_pending_interrupt();
1248}
1249
1250void
1251rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
1252{
1253 if (dfree == NULL) return;
1254
1255 struct objspace *objspace = objspace_ptr;
1256
1257 struct MMTk_final_job *job = xmalloc(sizeof(struct MMTk_final_job));
1258 job->kind = MMTK_FINAL_JOB_DFREE;
1259 job->as.dfree.func = dfree;
1260 job->as.dfree.data = data;
1261
1262 struct MMTk_final_job *prev;
1263 do {
1264 job->next = objspace->finalizer_jobs;
1265 prev = RUBY_ATOMIC_PTR_CAS(objspace->finalizer_jobs, job->next, job);
1266 } while (prev != job->next);
1267
1268 if (!ruby_free_at_exit_p()) {
1269 rb_postponed_job_trigger(objspace->finalizer_postponed_job);
1270 }
1271}
1272
1273VALUE
1274rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
1275{
1276 struct objspace *objspace = objspace_ptr;
1277 VALUE table;
1278 st_data_t data;
1279
1280 RBASIC(obj)->flags |= FL_FINALIZE;
1281
1282 int lev = RB_GC_VM_LOCK();
1283
1284 if (st_lookup(objspace->finalizer_table, obj, &data)) {
1285 table = (VALUE)data;
1286
1287 /* avoid duplicate block, table is usually small */
1288 {
1289 long len = RARRAY_LEN(table);
1290 long i;
1291
1292 for (i = 0; i < len; i++) {
1293 VALUE recv = RARRAY_AREF(table, i);
1294 if (rb_equal(recv, block)) {
1295 RB_GC_VM_UNLOCK(lev);
1296 return recv;
1297 }
1298 }
1299 }
1300
1301 rb_ary_push(table, block);
1302 }
1303 else {
1304 table = rb_ary_new3(2, rb_obj_id(obj), block);
1305 rb_obj_hide(table);
1306 st_add_direct(objspace->finalizer_table, obj, table);
1307 }
1308
1309 RB_GC_VM_UNLOCK(lev);
1310
1311 return block;
1312}
1313
1314void
1315rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
1316{
1317 struct objspace *objspace = objspace_ptr;
1318
1319 st_data_t data = obj;
1320
1321 int lev = RB_GC_VM_LOCK();
1322 st_delete(objspace->finalizer_table, &data, 0);
1323 RB_GC_VM_UNLOCK(lev);
1324
1325 FL_UNSET(obj, FL_FINALIZE);
1326}
1327
1328void
1329rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
1330{
1331 struct objspace *objspace = objspace_ptr;
1332 VALUE table;
1333 st_data_t data;
1334
1335 if (!FL_TEST(obj, FL_FINALIZE)) return;
1336
1337 int lev = RB_GC_VM_LOCK();
1338 if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) {
1339 table = rb_ary_dup((VALUE)data);
1340 RARRAY_ASET(table, 0, rb_obj_id(dest));
1341 st_insert(objspace->finalizer_table, dest, table);
1342 FL_SET(dest, FL_FINALIZE);
1343 }
1344 else {
1345 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1346 }
1347 RB_GC_VM_UNLOCK(lev);
1348}
1349
1350static int
1351move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1352{
1353 struct objspace *objspace = (struct objspace *)arg;
1354
1355 make_final_job(objspace, (VALUE)key, (VALUE)val);
1356
1357 return ST_DELETE;
1358}
1359
1360void
1361rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
1362{
1363 struct objspace *objspace = objspace_ptr;
1364
1365 while (objspace->finalizer_table->num_entries) {
1366 st_foreach(objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)objspace);
1367
1368 gc_run_finalizers(objspace);
1369 }
1370
1371 unsigned int lev = RB_GC_VM_LOCK();
1372 {
1373 struct MMTk_ractor_cache *rc;
1374 ccan_list_for_each(&objspace->ractor_caches, rc, list_node) {
1375 mmtk_flush_obj_free_buffer(rc);
1376 }
1377
1378 struct MMTk_RawVecOfObjRef registered_candidates = mmtk_get_all_obj_free_candidates();
1379 for (size_t i = 0; i < registered_candidates.len; i++) {
1380 VALUE obj = (VALUE)registered_candidates.ptr[i];
1381
1382 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1383 rb_gc_obj_free(objspace_ptr, obj);
1384 RBASIC(obj)->flags = 0;
1385 }
1386 }
1387 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1388 }
1389 RB_GC_VM_UNLOCK(lev);
1390
1391 gc_run_finalizers(objspace);
1392}
1393
1394// Forking
1395
1396void
1397rb_gc_impl_before_fork(void *objspace_ptr)
1398{
1399 struct objspace *objspace = objspace_ptr;
1400
1401 retry:
1402 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1403 rb_gc_vm_barrier();
1404
1405 /* At this point, we know that all the Ractors are paused because of the
1406 * rb_gc_vm_barrier above. Since rb_mmtk_block_for_gc is a barrier point,
1407 * one or more Ractors could be paused there. However, mmtk_before_fork is
1408 * not compatible with that because it assumes that the MMTk workers are idle,
1409 * but the workers are not idle because they are busy working on a GC.
1410 *
1411 * This essentially implements a trylock. It will optimistically lock but will
1412 * release the lock if it detects that any other Ractors are waiting in
1413 * rb_mmtk_block_for_gc.
1414 */
1415 rb_atomic_t mutator_blocking_count = RUBY_ATOMIC_LOAD(objspace->mutator_blocking_count);
1416 if (mutator_blocking_count != 0) {
1417 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1418 goto retry;
1419 }
1420
1421 mmtk_before_fork();
1422}
1423
1424void
1425rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1426{
1427 struct objspace *objspace = objspace_ptr;
1428
1429 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1430
1431 RB_GC_VM_UNLOCK(objspace->fork_hook_vm_lock_lev);
1432}
1433
1434// Statistics
1435
1436void
1437rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1438{
1439 struct objspace *objspace = objspace_ptr;
1440
1441 objspace->measure_gc_time = RTEST(flag);
1442}
1443
1444bool
1445rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1446{
1447 struct objspace *objspace = objspace_ptr;
1448
1449 return objspace->measure_gc_time;
1450}
1451
1452unsigned long long
1453rb_gc_impl_get_total_time(void *objspace_ptr)
1454{
1455 struct objspace *objspace = objspace_ptr;
1456
1457 return objspace->total_gc_time;
1458}
1459
1460size_t
1461rb_gc_impl_gc_count(void *objspace_ptr)
1462{
1463 struct objspace *objspace = objspace_ptr;
1464
1465 return objspace->gc_count;
1466}
1467
1468VALUE
1469rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE hash_or_key)
1470{
1471 VALUE hash = Qnil, key = Qnil;
1472
1473 if (SYMBOL_P(hash_or_key)) {
1474 key = hash_or_key;
1475 }
1476 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
1477 hash = hash_or_key;
1478 }
1479 else {
1480 rb_bug("gc_info_decode: non-hash or symbol given");
1481 }
1482
1483#define SET(name, attr) \
1484 if (key == ID2SYM(rb_intern_const(#name))) \
1485 return (attr); \
1486 else if (hash != Qnil) \
1487 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1488
1489 /* Hack to get StackProf working because it calls rb_gc_latest_gc_info with
1490 * the :state key and expects a result. This always returns the :none state. */
1491 SET(state, ID2SYM(rb_intern_const("none")));
1492#undef SET
1493
1494 if (!NIL_P(key)) {
1495 // Matched key should return above
1496 return Qundef;
1497 }
1498
1499 return hash;
1500}
1501
1502enum gc_stat_sym {
1503 gc_stat_sym_count,
1504 gc_stat_sym_moving_gc_count,
1505 gc_stat_sym_time,
1506 gc_stat_sym_total_allocated_objects,
1507 gc_stat_sym_total_bytes,
1508 gc_stat_sym_used_bytes,
1509 gc_stat_sym_free_bytes,
1510 gc_stat_sym_starting_heap_address,
1511 gc_stat_sym_last_heap_address,
1512 gc_stat_sym_weak_references_count,
1513 gc_stat_sym_last
1514};
1515
1516static VALUE gc_stat_symbols[gc_stat_sym_last];
1517
1518static void
1519setup_gc_stat_symbols(void)
1520{
1521 if (gc_stat_symbols[0] == 0) {
1522#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1523 S(count);
1524 S(moving_gc_count);
1525 S(time);
1526 S(total_allocated_objects);
1527 S(total_bytes);
1528 S(used_bytes);
1529 S(free_bytes);
1530 S(starting_heap_address);
1531 S(last_heap_address);
1532 S(weak_references_count);
1533 }
1534}
1535
1536VALUE
1537rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
1538{
1539 struct objspace *objspace = objspace_ptr;
1540 VALUE hash = Qnil, key = Qnil;
1541
1542 setup_gc_stat_symbols();
1543
1544 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1545 hash = hash_or_sym;
1546 }
1547 else if (SYMBOL_P(hash_or_sym)) {
1548 key = hash_or_sym;
1549 }
1550 else {
1551 rb_bug("non-hash or symbol given");
1552 }
1553
1554#define SET(name, attr) \
1555 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1556 return SIZET2NUM(attr); \
1557 else if (hash != Qnil) \
1558 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1559
1560 SET(count, objspace->gc_count);
1561 SET(moving_gc_count, objspace->moving_gc_count);
1562 SET(time, objspace->total_gc_time / (1000 * 1000));
1563 SET(total_allocated_objects, objspace->total_allocated_objects);
1564 SET(total_bytes, mmtk_total_bytes());
1565 SET(used_bytes, mmtk_used_bytes());
1566 SET(free_bytes, mmtk_free_bytes());
1567 SET(starting_heap_address, (size_t)mmtk_starting_heap_address());
1568 SET(last_heap_address, (size_t)mmtk_last_heap_address());
1569 SET(weak_references_count, mmtk_weak_references_count());
1570#undef SET
1571
1572 if (!NIL_P(key)) {
1573 // Matched key should return above
1574 return Qundef;
1575 }
1576
1577 return hash;
1578}
1579
1580VALUE
1581rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
1582{
1583 if (FIXNUM_P(heap_name) && SYMBOL_P(hash_or_sym)) {
1584 int heap_idx = FIX2INT(heap_name);
1585 if (heap_idx < 0 || heap_idx >= MMTK_HEAP_COUNT) {
1586 rb_raise(rb_eArgError, "size pool index out of range");
1587 }
1588
1589 if (hash_or_sym == ID2SYM(rb_intern("slot_size"))) {
1590 return SIZET2NUM(heap_sizes[heap_idx]);
1591 }
1592
1593 return Qundef;
1594 }
1595
1596 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
1597 return hash_or_sym;
1598 }
1599
1600 return Qundef;
1601}
1602
1603// Miscellaneous
1604
1605#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1606static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
1607
1609rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
1610{
1611 static ID ID_object_id;
1612
1613 if (!ID_object_id) {
1614#define I(s) ID_##s = rb_intern(#s);
1615 I(object_id);
1616#undef I
1617 }
1618
1619 size_t n = 0;
1620
1621#define SET_ENTRY(na, v) do { \
1622 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1623 object_metadata_entries[n].name = ID_##na; \
1624 object_metadata_entries[n].val = v; \
1625 n++; \
1626} while (0)
1627
1628 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1629
1630 object_metadata_entries[n].name = 0;
1631 object_metadata_entries[n].val = 0;
1632
1633 return object_metadata_entries;
1634}
1635
1636bool
1637rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
1638{
1639 if (ptr == NULL) return false;
1640 if ((uintptr_t)ptr % sizeof(void*) != 0) return false;
1641 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1642}
1643
1644bool
1645rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj)
1646{
1647 return false;
1648}
1649
1650void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event) { }
1651
1652void
1653rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
1654{
1655 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1656 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1657 }
1658
1659 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1660}
1661
1662// GC Identification
1663
1664const char *
1665rb_gc_impl_active_gc_name(void)
1666{
1667 return "mmtk";
1668}
Atomic operations.
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
Definition atomic.h:214
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
Definition atomic.h:365
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
Definition atomic.h:223
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1916
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1882
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
Definition fl_type.h:561
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
Definition fl_type.h:226
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:95
VALUE rb_mGC
GC module.
Definition gc.c:410
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:141
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:33
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:859
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
int len
Length of the buffer.
Definition io.h:8
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
Definition long.h:59
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
C99 shim for <stdbool.h>
void * rust_closure
The pointer to the Rust-level closure object.
Definition mmtk.h:50
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Definition mmtk.h:46
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Private header for the default GC and other GC implementations first introduced for [Feature #20470].
Definition gc.h:16
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376