10#include "gc/mmtk/mmtk.h"
12#include "ccan/list/list.h"
16#include <sys/sysctl.h>
20# define VM_CHECK_MODE RUBY_DEBUG
24#ifndef RACTOR_CHECK_MODE
25# define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
29# define RVALUE_SUFFIX_SIZE sizeof(VALUE)
30void rb_ractor_setup_belonging(
VALUE obj);
32# define RVALUE_SUFFIX_SIZE 0
40 size_t moving_gc_count;
42 size_t total_allocated_objects;
48 struct ccan_list_head ractor_caches;
49 unsigned long live_ractor_cache_count;
51 pthread_mutex_t mutex;
54 pthread_cond_t cond_world_stopped;
55 pthread_cond_t cond_world_started;
56 size_t start_the_world_count;
58 pthread_mutex_t event_hook_mutex;
61 bool gc_thread_crashed;
67 unsigned int fork_hook_vm_lock_lev;
70#define OBJ_FREE_BUF_CAPACITY 128
73 struct ccan_list_node list_node;
75 MMTk_Mutator *mutator;
80 MMTk_ObjectReference obj_free_parallel_buf[OBJ_FREE_BUF_CAPACITY];
81 size_t obj_free_parallel_count;
82 MMTk_ObjectReference obj_free_non_parallel_buf[OBJ_FREE_BUF_CAPACITY];
83 size_t obj_free_non_parallel_count;
90 MMTK_FINAL_JOB_FINALIZE,
99 VALUE finalizer_array;
104#ifdef RB_THREAD_LOCAL_SPECIFIER
107RB_THREAD_LOCAL_SPECIFIER
VALUE marking_parent_object;
109# error We currently need language-supported TLS
113# define MMTK_ASSERT(expr, ...) RUBY_ASSERT_ALWAYS(expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
115# define MMTK_ASSERT(expr, ...) ((void)0)
120static inline VALUE rb_mmtk_call_object_closure(
VALUE obj,
bool pin);
125 rb_mmtk_gc_thread_tls = gc_thread_tls;
129rb_mmtk_is_mutator(
void)
135rb_mmtk_stop_the_world(
void)
140 if ((err = pthread_mutex_lock(&
objspace->mutex)) != 0) {
141 rb_bug(
"ERROR: cannot lock objspace->mutex: %s", strerror(err));
148 if ((err = pthread_mutex_unlock(&
objspace->mutex)) != 0) {
149 rb_bug(
"ERROR: cannot release objspace->mutex: %s", strerror(err));
154rb_mmtk_resume_mutators(
bool current_gc_may_move)
159 if ((err = pthread_mutex_lock(&
objspace->mutex)) != 0) {
160 rb_bug(
"ERROR: cannot lock objspace->mutex: %s", strerror(err));
165 if (current_gc_may_move)
objspace->moving_gc_count++;
166 pthread_cond_broadcast(&
objspace->cond_world_started);
168 if ((err = pthread_mutex_unlock(&
objspace->mutex)) != 0) {
169 rb_bug(
"ERROR: cannot release objspace->mutex: %s", strerror(err));
180 size_t starting_gc_count =
objspace->gc_count;
182 int lock_lev = RB_GC_VM_LOCK();
185 if ((err = pthread_mutex_lock(&
objspace->mutex)) != 0) {
186 rb_bug(
"ERROR: cannot lock objspace->mutex: %s", strerror(err));
189 if (
objspace->gc_count == starting_gc_count) {
192 rb_gc_initialize_vm_context(&
objspace->vm_context);
194 mutator->gc_mutator_p =
true;
198 clock_gettime(CLOCK_MONOTONIC, &gc_start_time);
201 rb_gc_save_machine_context();
206 ccan_list_for_each(&
objspace->ractor_caches, rc, list_node) {
207 mmtk_flush_obj_free_buffer(rc);
212 pthread_cond_broadcast(&
objspace->cond_world_stopped);
219 if (RB_UNLIKELY(
objspace->crash_context.gc_thread_crashed)) {
220 rb_bug(
"%s",
objspace->crash_context.crash_msg);
225 clock_gettime(CLOCK_MONOTONIC, &gc_end_time);
228 (gc_end_time.tv_sec - gc_start_time.tv_sec) * (1000 * 1000 * 1000) +
229 (gc_end_time.tv_nsec - gc_start_time.tv_nsec);
233 if ((err = pthread_mutex_unlock(&
objspace->mutex)) != 0) {
234 rb_bug(
"ERROR: cannot release objspace->mutex: %s", strerror(err));
236 RB_GC_VM_UNLOCK(lock_lev);
240rb_mmtk_before_updating_jit_code(
void)
242 rb_gc_before_updating_jit_code();
246rb_mmtk_after_updating_jit_code(
void)
248 rb_gc_after_updating_jit_code();
252rb_mmtk_number_of_mutators(
void)
255 return objspace->live_ractor_cache_count;
259rb_mmtk_get_mutators(
void (*visit_mutator)(MMTk_Mutator *mutator,
void *data),
void *data)
264 ccan_list_for_each(&
objspace->ractor_caches, ractor_cache, list_node) {
265 visit_mutator(ractor_cache->mutator, data);
270rb_mmtk_scan_gc_roots(
void)
278pin_value(st_data_t key, st_data_t value, st_data_t data)
280 rb_gc_impl_mark_and_pin((
void *)data, (
VALUE)value);
286rb_mmtk_scan_objspace(
void)
290 if (
objspace->finalizer_table != NULL) {
295 while (job != NULL) {
297 case MMTK_FINAL_JOB_DFREE:
299 case MMTK_FINAL_JOB_FINALIZE:
300 rb_gc_impl_mark(
objspace, job->as.finalize.finalizer_array);
303 rb_bug(
"rb_mmtk_scan_objspace: unknown final job type %d", job->kind);
311rb_mmtk_move_obj_during_marking(MMTk_ObjectReference from, MMTk_ObjectReference to)
313 rb_gc_move_obj_during_marking((
VALUE)from, (
VALUE)to);
317rb_mmtk_update_object_references(MMTk_ObjectReference mmtk_object)
322 marking_parent_object = object;
323 rb_gc_update_object_references(rb_gc_get_objspace(),
object);
324 marking_parent_object = 0;
329rb_mmtk_call_gc_mark_children(MMTk_ObjectReference
object)
331 marking_parent_object = (
VALUE)
object;
332 rb_gc_mark_children(rb_gc_get_objspace(), (
VALUE)
object);
333 marking_parent_object = 0;
337rb_mmtk_handle_weak_references(MMTk_ObjectReference mmtk_object,
bool moving)
341 marking_parent_object = object;
343 rb_gc_handle_weak_references(
object);
346 rb_gc_update_object_references(rb_gc_get_objspace(),
object);
349 marking_parent_object = 0;
353rb_mmtk_call_obj_free(MMTk_ObjectReference
object)
359 pthread_mutex_lock(&
objspace->event_hook_mutex);
361 pthread_mutex_unlock(&
objspace->event_hook_mutex);
367 memset((
void *)obj, 0, rb_gc_impl_obj_slot_size(obj));
372rb_mmtk_vm_live_bytes(
void)
383 job->next =
objspace->finalizer_jobs;
384 job->kind = MMTK_FINAL_JOB_FINALIZE;
385 job->as.finalize.finalizer_array = table;
391rb_mmtk_update_finalizer_table_i(st_data_t key, st_data_t value, st_data_t data,
int error)
393 MMTK_ASSERT(mmtk_is_reachable((MMTk_ObjectReference)value));
398 if (mmtk_is_reachable((MMTk_ObjectReference)key)) {
399 VALUE new_key_location = rb_mmtk_call_object_closure((
VALUE)key,
false);
403 if (new_key_location != key) {
419rb_mmtk_update_finalizer_table_replace_i(st_data_t *key, st_data_t *value, st_data_t data,
int existing)
421 *key = rb_mmtk_call_object_closure((
VALUE)*key,
false);
427rb_mmtk_update_finalizer_table(
void)
431 st_foreach_with_replace(
433 rb_mmtk_update_finalizer_table_i,
434 rb_mmtk_update_finalizer_table_replace_i,
440rb_mmtk_global_tables_count(
void)
442 return RB_GC_VM_WEAK_TABLE_COUNT;
445static inline VALUE rb_mmtk_call_object_closure(
VALUE obj,
bool pin);
448rb_mmtk_update_global_tables_i(
VALUE val,
void *data)
450 if (!mmtk_is_reachable((MMTk_ObjectReference)val)) {
455 if (rb_mmtk_call_object_closure(val,
false) != val) {
463rb_mmtk_update_global_tables_replace_i(
VALUE *ptr,
void *data)
466 *ptr = rb_mmtk_call_object_closure(*ptr,
false);
472rb_mmtk_update_global_tables(
int table,
bool moving)
474 MMTK_ASSERT(table < RB_GC_VM_WEAK_TABLE_COUNT);
476 rb_gc_vm_weak_table_foreach(
477 rb_mmtk_update_global_tables_i,
478 rb_mmtk_update_global_tables_replace_i,
481 (
enum rb_gc_vm_weak_tables)table
486rb_mmtk_special_const_p(MMTk_ObjectReference
object)
495rb_mmtk_gc_thread_bug(const
char *msg, ...)
499 objspace->crash_context.gc_thread_crashed =
true;
503 vsnprintf(
objspace->crash_context.crash_msg,
sizeof(
objspace->crash_context.crash_msg), msg, args);
506 fprintf(stderr,
"-- GC thread backtrace "
507 "-------------------------------------------\n");
508 rb_gc_print_backtrace();
509 fprintf(stderr,
"\n");
511 rb_mmtk_resume_mutators(
false);
515 rb_bug(
"rb_mmtk_gc_thread_bug");
519rb_mmtk_gc_thread_panic_handler(
void)
521 rb_mmtk_gc_thread_bug(
"MMTk GC thread panicked");
525rb_mmtk_mutator_thread_panic_handler(
void)
527 rb_bug(
"Ruby mutator thread panicked");
532 rb_mmtk_init_gc_worker_thread,
534 rb_mmtk_stop_the_world,
535 rb_mmtk_resume_mutators,
536 rb_mmtk_block_for_gc,
537 rb_mmtk_before_updating_jit_code,
538 rb_mmtk_after_updating_jit_code,
539 rb_mmtk_number_of_mutators,
540 rb_mmtk_get_mutators,
541 rb_mmtk_scan_gc_roots,
542 rb_mmtk_scan_objspace,
543 rb_mmtk_move_obj_during_marking,
544 rb_mmtk_update_object_references,
545 rb_mmtk_call_gc_mark_children,
546 rb_mmtk_handle_weak_references,
547 rb_mmtk_call_obj_free,
548 rb_mmtk_vm_live_bytes,
549 rb_mmtk_update_global_tables,
550 rb_mmtk_global_tables_count,
551 rb_mmtk_update_finalizer_table,
552 rb_mmtk_special_const_p,
553 rb_mmtk_mutator_thread_panic_handler,
554 rb_mmtk_gc_thread_panic_handler,
558#define RB_MMTK_HEAP_LIMIT_PERC 80
559#define RB_MMTK_DEFAULT_HEAP_MIN (1024 * 1024)
560#define RB_MMTK_DEFAULT_HEAP_MAX (rb_mmtk_system_physical_memory() / 100 * RB_MMTK_HEAP_LIMIT_PERC)
563 RB_MMTK_DYNAMIC_HEAP,
568rb_mmtk_builder_init(
void)
570 MMTk_Builder *builder = mmtk_builder_default();
575rb_gc_impl_objspace_alloc(
void)
577 MMTk_Builder *builder = rb_mmtk_builder_init();
579 .ractor_check_mode = RACTOR_CHECK_MODE != 0,
580 .suffix_size = RVALUE_SUFFIX_SIZE,
582 mmtk_init_binding(builder, &binding_options, &ruby_upcalls);
584 return calloc(1,
sizeof(
struct objspace));
587static void gc_run_finalizers(
void *data);
590rb_gc_impl_objspace_init(
void *objspace_ptr)
596 objspace->finalizer_table = st_init_numtable();
599 ccan_list_head_init(&
objspace->ractor_caches);
601 objspace->mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
602 objspace->cond_world_stopped = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
603 objspace->cond_world_started = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
605 objspace->event_hook_mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
609rb_gc_impl_objspace_free(
void *objspace_ptr)
615rb_gc_impl_ractor_cache_alloc(
void *objspace_ptr,
void *ractor)
618 if (
objspace->live_ractor_cache_count == 0) {
619 mmtk_initialize_collection(ractor);
621 objspace->live_ractor_cache_count++;
624 ccan_list_add(&
objspace->ractor_caches, &cache->list_node);
626 cache->mutator = mmtk_bind_mutator(cache);
627 cache->bump_pointer = mmtk_get_bump_pointer_allocator(cache->mutator);
633rb_gc_impl_ractor_cache_free(
void *objspace_ptr,
void *cache_ptr)
638 ccan_list_del(&cache->list_node);
640 mmtk_flush_obj_free_buffer(cache);
642 if (ruby_free_at_exit_p()) {
643 MMTK_ASSERT(
objspace->live_ractor_cache_count > 0);
646 MMTK_ASSERT(
objspace->live_ractor_cache_count > 1);
649 objspace->live_ractor_cache_count--;
651 mmtk_destroy_mutator(cache->mutator);
654void rb_gc_impl_set_params(
void *objspace_ptr) { }
656static VALUE gc_verify_internal_consistency(
VALUE self) {
return Qnil; }
659#define MMTK_HEAP_COUNT 12
660#define MMTK_MAX_OBJ_SIZE 1024
661static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
662 32, 40, 64, 80, 96, 128, 160, 256, 512, 640, 768, MMTK_MAX_OBJ_SIZE, 0
665#define MMTK_HEAP_COUNT 5
666#define MMTK_MAX_OBJ_SIZE 512
667static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
668 32, 64, 128, 256, MMTK_MAX_OBJ_SIZE, 0
675 VALUE gc_constants = rb_hash_new();
678 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OVERHEAD")),
INT2NUM(0));
679 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(MMTK_MAX_OBJ_SIZE));
680 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_COUNT")),
LONG2FIX(MMTK_HEAP_COUNT));
682 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OLD_AGE")),
INT2FIX(0));
684 rb_define_const(
rb_mGC,
"INTERNAL_CONSTANTS", gc_constants);
697rb_gc_impl_heap_sizes(
void *objspace_ptr)
703rb_mmtk_obj_free_iter_wrapper(
VALUE obj,
void *data)
708 rb_gc_obj_free_vm_weak_references(obj);
719rb_gc_impl_shutdown_free_objects(
void *objspace_ptr)
721 mmtk_set_gc_enabled(
false);
722 each_object(objspace_ptr, rb_mmtk_obj_free_iter_wrapper, objspace_ptr);
723 mmtk_set_gc_enabled(
true);
728rb_gc_impl_start(
void *objspace_ptr,
bool full_mark,
bool immediate_mark,
bool immediate_sweep,
bool compact)
730 mmtk_handle_user_collection_request(rb_gc_get_ractor_newobj_cache(),
true, full_mark);
734rb_gc_impl_during_gc_p(
void *objspace_ptr)
741rb_gc_impl_prepare_heap_i(MMTk_ObjectReference obj,
void *d)
743 rb_gc_prepare_heap_process_object((
VALUE)obj);
747rb_gc_impl_prepare_heap(
void *objspace_ptr)
749 mmtk_enumerate_objects(rb_gc_impl_prepare_heap_i, NULL);
753rb_gc_impl_gc_enable(
void *objspace_ptr)
755 mmtk_set_gc_enabled(
true);
759rb_gc_impl_gc_disable(
void *objspace_ptr,
bool finish_current_gc)
761 mmtk_set_gc_enabled(
false);
765rb_gc_impl_gc_enabled_p(
void *objspace_ptr)
767 return mmtk_gc_enabled_p();
771rb_gc_impl_stress_set(
void *objspace_ptr,
VALUE flag)
779rb_gc_impl_stress_get(
void *objspace_ptr)
787rb_gc_impl_config_get(
void *objspace_ptr)
789 VALUE hash = rb_hash_new();
794 size_t heap_min = mmtk_heap_min();
802rb_gc_impl_config_set(
void *objspace_ptr,
VALUE hash)
808rb_gc_impl_get_vm_context(
void *objspace_ptr)
821 if (bump_pointer == NULL)
return 0;
823 uintptr_t new_cursor = bump_pointer->cursor + size;
825 if (new_cursor > bump_pointer->limit) {
830 bump_pointer->cursor = new_cursor;
836obj_can_parallel_free_p(
VALUE obj)
859 if (cache->obj_free_parallel_count > 0) {
860 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
861 cache->obj_free_parallel_count,
true);
862 cache->obj_free_parallel_count = 0;
864 if (cache->obj_free_non_parallel_count > 0) {
865 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
866 cache->obj_free_non_parallel_count,
false);
867 cache->obj_free_non_parallel_count = 0;
874 if (obj_can_parallel_free_p(obj)) {
875 cache->obj_free_parallel_buf[cache->obj_free_parallel_count++] = (MMTk_ObjectReference)obj;
876 if (cache->obj_free_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
877 mmtk_add_obj_free_candidates(cache->obj_free_parallel_buf,
878 cache->obj_free_parallel_count,
true);
879 cache->obj_free_parallel_count = 0;
883 cache->obj_free_non_parallel_buf[cache->obj_free_non_parallel_count++] = (MMTk_ObjectReference)obj;
884 if (cache->obj_free_non_parallel_count >= OBJ_FREE_BUF_CAPACITY) {
885 mmtk_add_obj_free_candidates(cache->obj_free_non_parallel_buf,
886 cache->obj_free_non_parallel_count,
false);
887 cache->obj_free_non_parallel_count = 0;
893rb_gc_impl_new_obj(
void *objspace_ptr,
void *cache_ptr,
VALUE klass,
VALUE flags,
bool wb_protected,
size_t alloc_size)
895#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
899 if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug(
"too big");
900 for (
int i = 0; i < MMTK_HEAP_COUNT; i++) {
901 if (alloc_size == heap_sizes[i])
break;
902 if (alloc_size < heap_sizes[i]) {
903 alloc_size = heap_sizes[i];
909 mmtk_handle_user_collection_request(ractor_cache,
false,
false);
913 alloc_size +=
sizeof(
VALUE) + RVALUE_SUFFIX_SIZE;
915 VALUE *alloc_obj = (
VALUE *)rb_mmtk_alloc_fast_path(
objspace, ractor_cache, alloc_size);
917 alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
921 alloc_obj[-1] = alloc_size -
sizeof(
VALUE) - RVALUE_SUFFIX_SIZE;
922 alloc_obj[0] = flags;
923 alloc_obj[1] = klass;
926 mmtk_post_alloc(ractor_cache->mutator, (
void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
929 mmtk_buffer_obj_free_candidate(ractor_cache, (
VALUE)alloc_obj);
931 objspace->total_allocated_objects++;
934 rb_ractor_setup_belonging((
VALUE)alloc_obj);
937 return (
VALUE)alloc_obj;
941rb_gc_impl_obj_slot_size(
VALUE obj)
943 return ((
VALUE *)obj)[-1];
947rb_gc_impl_heap_id_for_size(
void *objspace_ptr,
size_t size)
949 for (
int i = 0; i < MMTK_HEAP_COUNT; i++) {
950 if (size == heap_sizes[i])
return i;
951 if (size < heap_sizes[i])
return i;
954 rb_bug(
"size too big");
958rb_gc_impl_size_allocatable_p(
size_t size)
960 return size <= MMTK_MAX_OBJ_SIZE;
965rb_gc_impl_malloc(
void *objspace_ptr,
size_t size,
bool gc_allowed)
972rb_gc_impl_calloc(
void *objspace_ptr,
size_t size,
bool gc_allowed)
975 return calloc(1, size);
979rb_gc_impl_realloc(
void *objspace_ptr,
void *ptr,
size_t new_size,
size_t old_size,
bool gc_allowed)
982 return realloc(ptr, new_size);
986rb_gc_impl_free(
void *objspace_ptr,
void *ptr,
size_t old_size)
992void rb_gc_impl_adjust_memory_usage(
void *objspace_ptr, ssize_t diff) { }
996rb_mmtk_call_object_closure(
VALUE obj,
bool pin)
999 const size_t info_size = 256;
1000 char obj_info_buf[info_size];
1001 rb_raw_obj_info(obj_info_buf, info_size, obj);
1003 char parent_obj_info_buf[info_size];
1004 rb_raw_obj_info(parent_obj_info_buf, info_size, marking_parent_object);
1006 rb_mmtk_gc_thread_bug(
"try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
1011 rb_mmtk_gc_thread_tls->gc_context,
1012 (MMTk_ObjectReference)obj,
1018rb_gc_impl_mark(
void *objspace_ptr,
VALUE obj)
1022 rb_mmtk_call_object_closure(obj,
false);
1026rb_gc_impl_mark_and_move(
void *objspace_ptr,
VALUE *ptr)
1030 VALUE new_obj = rb_mmtk_call_object_closure(*ptr,
false);
1031 if (new_obj != *ptr) {
1037rb_gc_impl_mark_and_pin(
void *objspace_ptr,
VALUE obj)
1041 rb_mmtk_call_object_closure(obj,
true);
1045rb_gc_impl_mark_maybe(
void *objspace_ptr,
VALUE obj)
1047 if (rb_gc_impl_pointer_to_heap_p(objspace_ptr, (
const void *)obj)) {
1048 rb_gc_impl_mark_and_pin(objspace_ptr, obj);
1053rb_gc_impl_declare_weak_references(
void *objspace_ptr,
VALUE obj)
1056 mmtk_declare_weak_references((MMTk_ObjectReference)obj);
1060rb_gc_impl_handle_weak_references_alive_p(
void *objspace_ptr,
VALUE obj)
1062 return mmtk_weak_references_alive_p((MMTk_ObjectReference)obj);
1067rb_gc_impl_register_pinning_obj(
void *objspace_ptr,
VALUE obj)
1069 mmtk_register_pinning_obj((MMTk_ObjectReference)obj);
1073rb_gc_impl_object_moved_p(
void *objspace_ptr,
VALUE obj)
1075 return rb_mmtk_call_object_closure(obj,
false) != obj;
1079rb_gc_impl_location(
void *objspace_ptr,
VALUE obj)
1081 return rb_mmtk_call_object_closure(obj,
false);
1086rb_gc_impl_writebarrier(
void *objspace_ptr,
VALUE a,
VALUE b)
1093 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (
void *)a)) {
1095 rb_bug(
"a: %s is not an object", rb_raw_obj_info(buff, 256, a));
1098 if (!rb_gc_impl_pointer_to_heap_p(objspace_ptr, (
void *)b)) {
1100 rb_bug(
"b: %s is not an object", rb_raw_obj_info(buff, 256, b));
1107 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)a);
1111rb_gc_impl_writebarrier_unprotect(
void *objspace_ptr,
VALUE obj)
1113 mmtk_register_wb_unprotected_object((MMTk_ObjectReference)obj);
1117rb_gc_impl_writebarrier_remember(
void *objspace_ptr,
VALUE obj)
1121 mmtk_object_reference_write_post(cache->mutator, (MMTk_ObjectReference)obj);
1126each_objects_i(MMTk_ObjectReference obj,
void *d)
1128 rb_darray(
VALUE) *objs = d;
1130 rb_darray_append(objs, (
VALUE)obj);
1136 rb_darray(
VALUE) objs;
1137 rb_darray_make(&objs, 0);
1139 mmtk_enumerate_objects(each_objects_i, &objs);
1142 rb_darray_foreach(objs, i, obj_ptr) {
1143 if (!mmtk_is_mmtk_object((MMTk_ObjectReference)*obj_ptr))
continue;
1145 if (func(*obj_ptr, data) != 0) {
1150 rb_darray_free(objs);
1154 int (*func)(
void *,
void *, size_t,
void *);
1159rb_gc_impl_each_objects_i(
VALUE obj,
void *d)
1163 size_t slot_size = rb_gc_impl_obj_slot_size(obj);
1165 return data->func((
void *)obj, (
void *)(obj + slot_size), slot_size, data->data);
1169rb_gc_impl_each_objects(
void *objspace_ptr,
int (*func)(
void *,
void *,
size_t,
void *),
void *data)
1176 each_object(objspace_ptr, rb_gc_impl_each_objects_i, &each_objects_data);
1180 void (*func)(
VALUE,
void *);
1185rb_gc_impl_each_object_i(
VALUE obj,
void *d)
1189 data->func(obj, data->data);
1195rb_gc_impl_each_object(
void *objspace_ptr,
void (*func)(
VALUE,
void *),
void *data)
1202 each_object(objspace_ptr, rb_gc_impl_each_object_i, &each_object_data);
1207gc_run_finalizers_get_final(
long i,
void *data)
1215gc_run_finalizers(
void *data)
1219 rb_gc_set_pending_interrupt();
1221 while (
objspace->finalizer_jobs != NULL) {
1223 objspace->finalizer_jobs = job->next;
1225 switch (job->kind) {
1226 case MMTK_FINAL_JOB_DFREE:
1227 job->as.dfree.func(job->as.dfree.data);
1229 case MMTK_FINAL_JOB_FINALIZE: {
1230 VALUE finalizer_array = job->as.finalize.finalizer_array;
1232 rb_gc_run_obj_finalizer(
1235 gc_run_finalizers_get_final,
1236 (
void *)finalizer_array
1247 rb_gc_unset_pending_interrupt();
1251rb_gc_impl_make_zombie(
void *objspace_ptr,
VALUE obj,
void (*dfree)(
void *),
void *data)
1253 if (dfree == NULL)
return;
1258 job->kind = MMTK_FINAL_JOB_DFREE;
1259 job->as.dfree.func = dfree;
1260 job->as.dfree.data = data;
1264 job->next =
objspace->finalizer_jobs;
1266 }
while (prev != job->next);
1268 if (!ruby_free_at_exit_p()) {
1274rb_gc_impl_define_finalizer(
void *objspace_ptr,
VALUE obj,
VALUE block)
1282 int lev = RB_GC_VM_LOCK();
1284 if (st_lookup(
objspace->finalizer_table, obj, &data)) {
1285 table = (
VALUE)data;
1292 for (i = 0; i <
len; i++) {
1295 RB_GC_VM_UNLOCK(lev);
1306 st_add_direct(
objspace->finalizer_table, obj, table);
1309 RB_GC_VM_UNLOCK(lev);
1315rb_gc_impl_undefine_finalizer(
void *objspace_ptr,
VALUE obj)
1319 st_data_t data = obj;
1321 int lev = RB_GC_VM_LOCK();
1322 st_delete(
objspace->finalizer_table, &data, 0);
1323 RB_GC_VM_UNLOCK(lev);
1329rb_gc_impl_copy_finalizer(
void *objspace_ptr,
VALUE dest,
VALUE obj)
1337 int lev = RB_GC_VM_LOCK();
1338 if (RB_LIKELY(st_lookup(
objspace->finalizer_table, obj, &data))) {
1341 st_insert(
objspace->finalizer_table, dest, table);
1345 rb_bug(
"rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
1347 RB_GC_VM_UNLOCK(lev);
1351move_finalizer_from_table_i(st_data_t key, st_data_t val, st_data_t arg)
1361rb_gc_impl_shutdown_call_finalizer(
void *objspace_ptr)
1365 while (
objspace->finalizer_table->num_entries) {
1366 st_foreach(
objspace->finalizer_table, move_finalizer_from_table_i, (st_data_t)
objspace);
1371 unsigned int lev = RB_GC_VM_LOCK();
1374 ccan_list_for_each(&
objspace->ractor_caches, rc, list_node) {
1375 mmtk_flush_obj_free_buffer(rc);
1379 for (
size_t i = 0; i < registered_candidates.len; i++) {
1380 VALUE obj = (
VALUE)registered_candidates.ptr[i];
1382 if (rb_gc_shutdown_call_finalizer_p(obj)) {
1383 rb_gc_obj_free(objspace_ptr, obj);
1387 mmtk_free_raw_vec_of_obj_ref(registered_candidates);
1389 RB_GC_VM_UNLOCK(lev);
1397rb_gc_impl_before_fork(
void *objspace_ptr)
1402 objspace->fork_hook_vm_lock_lev = RB_GC_VM_LOCK();
1416 if (mutator_blocking_count != 0) {
1417 RB_GC_VM_UNLOCK(
objspace->fork_hook_vm_lock_lev);
1425rb_gc_impl_after_fork(
void *objspace_ptr, rb_pid_t pid)
1429 mmtk_after_fork(rb_gc_get_ractor_newobj_cache());
1431 RB_GC_VM_UNLOCK(
objspace->fork_hook_vm_lock_lev);
1437rb_gc_impl_set_measure_total_time(
void *objspace_ptr,
VALUE flag)
1445rb_gc_impl_get_measure_total_time(
void *objspace_ptr)
1453rb_gc_impl_get_total_time(
void *objspace_ptr)
1461rb_gc_impl_gc_count(
void *objspace_ptr)
1469rb_gc_impl_latest_gc_info(
void *objspace_ptr,
VALUE hash_or_key)
1480 rb_bug(
"gc_info_decode: non-hash or symbol given");
1483#define SET(name, attr) \
1484 if (key == ID2SYM(rb_intern_const(#name))) \
1486 else if (hash != Qnil) \
1487 rb_hash_aset(hash, ID2SYM(rb_intern_const(#name)), (attr));
1504 gc_stat_sym_moving_gc_count,
1506 gc_stat_sym_total_allocated_objects,
1507 gc_stat_sym_total_bytes,
1508 gc_stat_sym_used_bytes,
1509 gc_stat_sym_free_bytes,
1510 gc_stat_sym_starting_heap_address,
1511 gc_stat_sym_last_heap_address,
1512 gc_stat_sym_weak_references_count,
1516static VALUE gc_stat_symbols[gc_stat_sym_last];
1519setup_gc_stat_symbols(
void)
1521 if (gc_stat_symbols[0] == 0) {
1522#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
1526 S(total_allocated_objects);
1530 S(starting_heap_address);
1531 S(last_heap_address);
1532 S(weak_references_count);
1537rb_gc_impl_stat(
void *objspace_ptr,
VALUE hash_or_sym)
1542 setup_gc_stat_symbols();
1551 rb_bug(
"non-hash or symbol given");
1554#define SET(name, attr) \
1555 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
1556 return SIZET2NUM(attr); \
1557 else if (hash != Qnil) \
1558 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
1561 SET(moving_gc_count,
objspace->moving_gc_count);
1562 SET(time,
objspace->total_gc_time / (1000 * 1000));
1563 SET(total_allocated_objects,
objspace->total_allocated_objects);
1564 SET(total_bytes, mmtk_total_bytes());
1565 SET(used_bytes, mmtk_used_bytes());
1566 SET(free_bytes, mmtk_free_bytes());
1567 SET(starting_heap_address, (
size_t)mmtk_starting_heap_address());
1568 SET(last_heap_address, (
size_t)mmtk_last_heap_address());
1569 SET(weak_references_count, mmtk_weak_references_count());
1581rb_gc_impl_stat_heap(
void *objspace_ptr,
VALUE heap_name,
VALUE hash_or_sym)
1584 int heap_idx =
FIX2INT(heap_name);
1585 if (heap_idx < 0 || heap_idx >= MMTK_HEAP_COUNT) {
1586 rb_raise(rb_eArgError,
"size pool index out of range");
1589 if (hash_or_sym ==
ID2SYM(rb_intern(
"slot_size"))) {
1605#define RB_GC_OBJECT_METADATA_ENTRY_COUNT 1
1609rb_gc_impl_object_metadata(
void *objspace_ptr,
VALUE obj)
1611 static ID ID_object_id;
1613 if (!ID_object_id) {
1614#define I(s) ID_##s = rb_intern(#s);
1621#define SET_ENTRY(na, v) do { \
1622 MMTK_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
1623 object_metadata_entries[n].name = ID_##na; \
1624 object_metadata_entries[n].val = v; \
1628 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
1630 object_metadata_entries[n].name = 0;
1631 object_metadata_entries[n].val = 0;
1633 return object_metadata_entries;
1637rb_gc_impl_pointer_to_heap_p(
void *objspace_ptr,
const void *ptr)
1639 if (ptr == NULL)
return false;
1640 if ((uintptr_t)ptr %
sizeof(
void*) != 0)
return false;
1641 return mmtk_is_mmtk_object((MMTk_Address)ptr);
1645rb_gc_impl_garbage_object_p(
void *objspace_ptr,
VALUE obj)
1650void rb_gc_impl_set_event_hook(
void *objspace_ptr,
const rb_event_flag_t event) { }
1653rb_gc_impl_copy_attributes(
void *objspace_ptr,
VALUE dest,
VALUE obj)
1655 if (mmtk_object_wb_unprotected_p((MMTk_ObjectReference)obj)) {
1656 rb_gc_impl_writebarrier_unprotect(objspace_ptr, dest);
1659 rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
1665rb_gc_impl_active_gc_name(
void)
#define RUBY_ATOMIC_INC(var)
Atomically increments the value pointed by var.
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_DEC(var)
Atomically decrements the value pointed by var.
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
static void RB_FL_SET(VALUE obj, VALUE flags)
Sets the given flag(s).
@ RUBY_FL_FINALIZE
This flag has something to do with finalisers.
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define T_NONE
Old name of RUBY_T_NONE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_HASH
Old name of RUBY_T_HASH.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
int len
Length of the buffer.
#define RB_ULONG2NUM
Just another name of rb_ulong2num_inline.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define RARRAY_LEN
Just another name of rb_array_len.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
void * rust_closure
The pointer to the Rust-level closure object.
MMTk_ObjectClosureFunction c_function
The function to be called from C.
Ruby object's base components.
Private header for the default GC and other GC implementations first introduced for [Feature #20470].
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.