Ruby 4.1.0dev (2026-05-14 revision 4c3de1a7b063c91015a54b8b125676b60d565959)
gc.c (4c3de1a7b063c91015a54b8b125676b60d565959)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#include "ruby/internal/config.h"
15#ifdef _WIN32
16# include "ruby/ruby.h"
17#endif
18
19#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
20# include "wasm/setjmp.h"
21# include "wasm/machine.h"
22#else
23# include <setjmp.h>
24#endif
25#include <stdarg.h>
26#include <stdio.h>
27
28/* MALLOC_HEADERS_BEGIN */
29#ifndef HAVE_MALLOC_USABLE_SIZE
30# ifdef _WIN32
31# define HAVE_MALLOC_USABLE_SIZE
32# define malloc_usable_size(a) _msize(a)
33# elif defined HAVE_MALLOC_SIZE
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) malloc_size(a)
36# endif
37#endif
38
39#ifdef HAVE_MALLOC_USABLE_SIZE
40# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
41/* Alternative malloc header is included in ruby/missing.h */
42# elif defined(HAVE_MALLOC_H)
43# include <malloc.h>
44# elif defined(HAVE_MALLOC_NP_H)
45# include <malloc_np.h>
46# elif defined(HAVE_MALLOC_MALLOC_H)
47# include <malloc/malloc.h>
48# endif
49#endif
50
51/* MALLOC_HEADERS_END */
52
53#ifdef HAVE_SYS_TIME_H
54# include <sys/time.h>
55#endif
56
57#ifdef HAVE_SYS_RESOURCE_H
58# include <sys/resource.h>
59#endif
60
61#if defined _WIN32 || defined __CYGWIN__
62# include <windows.h>
63#elif defined(HAVE_POSIX_MEMALIGN)
64#elif defined(HAVE_MEMALIGN)
65# include <malloc.h>
66#endif
67
68#include <sys/types.h>
69
70#ifdef __EMSCRIPTEN__
71#include <emscripten.h>
72#endif
73
74/* For ruby_annotate_mmap */
75#ifdef HAVE_SYS_PRCTL_H
76#include <sys/prctl.h>
77#endif
78
79#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
80
81#include "constant.h"
82#include "darray.h"
83#include "debug_counter.h"
84#include "eval_intern.h"
85#include "gc/gc.h"
86#include "id_table.h"
87#include "internal.h"
88#include "internal/class.h"
89#include "internal/compile.h"
90#include "internal/complex.h"
91#include "internal/concurrent_set.h"
92#include "internal/cont.h"
93#include "internal/error.h"
94#include "internal/eval.h"
95#include "internal/gc.h"
96#include "internal/hash.h"
97#include "internal/imemo.h"
98#include "internal/io.h"
99#include "internal/numeric.h"
100#include "internal/object.h"
101#include "internal/proc.h"
102#include "internal/rational.h"
103#include "internal/re.h"
104#include "internal/sanitizers.h"
105#include "internal/struct.h"
106#include "internal/symbol.h"
107#include "internal/thread.h"
108#include "internal/variable.h"
109#include "internal/warnings.h"
110#include "probes.h"
111#include "regint.h"
112#include "ruby/debug.h"
113#include "ruby/io.h"
114#include "ruby/re.h"
115#include "ruby/st.h"
116#include "ruby/thread.h"
117#include "ruby/util.h"
118#include "ruby/vm.h"
119#include "ruby_assert.h"
120#include "ruby_atomic.h"
121#include "symbol.h"
122#include "variable.h"
123#include "vm_core.h"
124#include "vm_sync.h"
125#include "vm_callinfo.h"
126#include "ractor_core.h"
127#include "yjit.h"
128#include "zjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133// TODO: Don't export this function in modular GC, instead MMTk should figure out
134// how to combine GC thread backtrace with mutator thread backtrace.
135void
136rb_gc_print_backtrace(void)
137{
138 rb_print_backtrace(stderr);
139}
140
141unsigned int
142rb_gc_vm_lock(const char *file, int line)
143{
144 unsigned int lev = 0;
145 rb_vm_lock_enter(&lev, file, line);
146 return lev;
147}
148
149void
150rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
151{
152 rb_vm_lock_leave(&lev, file, line);
153}
154
155unsigned int
156rb_gc_cr_lock(const char *file, int line)
157{
158 unsigned int lev;
159 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
160 return lev;
161}
162
163void
164rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
165{
166 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
167}
168
169unsigned int
170rb_gc_vm_lock_no_barrier(const char *file, int line)
171{
172 unsigned int lev = 0;
173 rb_vm_lock_enter_nb(&lev, file, line);
174 return lev;
175}
176
177void
178rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
179{
180 rb_vm_lock_leave_nb(&lev, file, line);
181}
182
183void
184rb_gc_vm_barrier(void)
185{
186 rb_vm_barrier();
187}
188
189void *
190rb_gc_get_ractor_newobj_cache(void)
191{
192 return GET_RACTOR()->newobj_cache;
193}
194
195void
196rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_initialize(&context->lock);
199 context->ec = GET_EC();
200}
201
202bool
203rb_gc_event_hook_required_p(rb_event_flag_t event)
204{
205 return ruby_vm_event_flags & event;
206}
207
208void
209rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
210{
211 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
212
213 rb_execution_context_t *ec = rb_gc_get_ec();
214 if (!ec->cfp) return;
215
216#if USE_MODULAR_GC
217 bool gc_thread_p = false;
218 if (!GET_EC()) {
219 gc_thread_p = true;
220
221# ifdef RB_THREAD_LOCAL_SPECIFIER
222 rb_current_ec_set(ec);
223# else
224 native_tls_set(ruby_current_ec_key, ec);
225# endif
226 }
227#endif
228
229 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
230
231#if USE_MODULAR_GC
232 if (gc_thread_p) {
233# ifdef RB_THREAD_LOCAL_SPECIFIER
234 rb_current_ec_set(NULL);
235# else
236 native_tls_set(ruby_current_ec_key, NULL);
237# endif
238 }
239#endif
240}
241
242void *
243rb_gc_get_objspace(void)
244{
245 return GET_VM()->gc.objspace;
246}
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 ASSERT_vm_unlocking();
292 rb_ractor_ignore_belonging(true);
293 EC_PUSH_TAG(ec);
294 enum ruby_tag_type state = EC_EXEC_TAG();
295 if (state != TAG_NONE) {
296 ++saved.finished; /* skip failed finalizer */
297
298 VALUE failed_final = saved.final;
299 saved.final = Qundef;
300 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
301 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
302 rb_ec_error_print(ec, ec->errinfo);
303 }
304 }
305
306 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
307 saved.final = callback(i, data);
308 rb_check_funcall(saved.final, idCall, 1, &objid);
309 }
310 EC_POP_TAG();
311 rb_ractor_ignore_belonging(false);
312#undef RESTORE_FINALIZER
313}
314
315void
316rb_gc_set_pending_interrupt(void)
317{
318 rb_execution_context_t *ec = GET_EC();
319 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
320}
321
322void
323rb_gc_unset_pending_interrupt(void)
324{
325 rb_execution_context_t *ec = GET_EC();
326 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
327}
328
329bool
330rb_gc_multi_ractor_p(void)
331{
332 return rb_multi_ractor_p();
333}
334
335bool
336rb_gc_shutdown_call_finalizer_p(VALUE obj)
337{
338 switch (BUILTIN_TYPE(obj)) {
339 case T_DATA:
340 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
341 if (rb_obj_is_thread(obj)) return false;
342 if (rb_obj_is_mutex(obj)) return false;
343 if (rb_obj_is_fiber(obj)) return false;
344 if (rb_ractor_p(obj)) return false;
345 if (rb_obj_is_fstring_table(obj)) return false;
346 if (rb_obj_is_symbol_table(obj)) return false;
347
348 return true;
349
350 case T_FILE:
351 return true;
352
353 case T_SYMBOL:
354 return true;
355
356 case T_NONE:
357 return false;
358
359 default:
360 return ruby_free_at_exit_p();
361 }
362}
363
364void
365rb_gc_obj_changed_pool(VALUE obj, size_t heap_id)
366{
368
369 RBASIC_SET_SHAPE_ID(obj, rb_obj_shape_transition_heap(obj, heap_id));
370}
371
372void rb_vm_update_references(void *ptr);
373
374#define rb_setjmp(env) RUBY_SETJMP(env)
375#define rb_jmp_buf rb_jmpbuf_t
376#undef rb_data_object_wrap
377
378#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
379#define MAP_ANONYMOUS MAP_ANON
380#endif
381
382#define unless_objspace(objspace) \
383 void *objspace; \
384 rb_vm_t *unless_objspace_vm = GET_VM(); \
385 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
386 else /* return; or objspace will be warned uninitialized */
387
388#define RMOVED(obj) ((struct RMoved *)(obj))
389
390#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
391 if (gc_object_moved_p_internal((_objspace), (VALUE)(_thing))) { \
392 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
393 } \
394} while (0)
395
396#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
397
398#if RUBY_MARK_FREE_DEBUG
399int ruby_gc_debug_indent = 0;
400#endif
401
402#ifndef RGENGC_OBJ_INFO
403# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
404#endif
405
406#ifndef CALC_EXACT_MALLOC_SIZE
407# define CALC_EXACT_MALLOC_SIZE 0
408#endif
409
411
412static size_t malloc_offset = 0;
413#if defined(HAVE_MALLOC_USABLE_SIZE)
414static size_t
415gc_compute_malloc_offset(void)
416{
417 // Different allocators use different metadata storage strategies which result in different
418 // ideal sizes.
419 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
420 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
421 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
422 // waste memory.
423 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
424 // no wasted memory.
425 size_t offset = 0;
426 for (offset = 0; offset <= 16; offset += 8) {
427 size_t allocated = (64 - offset);
428 void *test_ptr = malloc(allocated);
429 size_t wasted = malloc_usable_size(test_ptr) - allocated;
430 free(test_ptr);
431
432 if (wasted == 0) {
433 return offset;
434 }
435 }
436 return 0;
437}
438#else
439static size_t
440gc_compute_malloc_offset(void)
441{
442 // If we don't have malloc_usable_size, we use powers of 2.
443 return 0;
444}
445#endif
446
447size_t
448rb_malloc_grow_capa(size_t current, size_t type_size)
449{
450 size_t current_capacity = current;
451 if (current_capacity < 4) {
452 current_capacity = 4;
453 }
454 current_capacity *= type_size;
455
456 // We double the current capacity.
457 size_t new_capacity = (current_capacity * 2);
458
459 // And round up to the next power of 2 if it's not already one.
460 if (rb_popcount64(new_capacity) != 1) {
461 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
462 }
463
464 new_capacity -= malloc_offset;
465 new_capacity /= type_size;
466 if (current > new_capacity) {
467 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
468 }
469 RUBY_ASSERT(new_capacity > current);
470 return new_capacity;
471}
472
473static inline struct rbimpl_size_overflow_tag
474size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
475{
476 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
477 struct rbimpl_size_overflow_tag u = rbimpl_size_add_overflow(t.result, z);
478 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed, u.result };
479}
480
481static inline struct rbimpl_size_overflow_tag
482size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
483{
484 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
485 struct rbimpl_size_overflow_tag u = rbimpl_size_mul_overflow(z, w);
486 struct rbimpl_size_overflow_tag v = rbimpl_size_add_overflow(t.result, u.result);
487 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed || v.overflowed, v.result };
488}
489
490PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
491
492static inline size_t
493size_mul_or_raise(size_t x, size_t y, VALUE exc)
494{
495 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
496 if (LIKELY(!t.overflowed)) {
497 return t.result;
498 }
499 else if (rb_during_gc()) {
500 rb_memerror(); /* or...? */
501 }
502 else {
503 gc_raise(
504 exc,
505 "integer overflow: %"PRIuSIZE
506 " * %"PRIuSIZE
507 " > %"PRIuSIZE,
508 x, y, (size_t)SIZE_MAX);
509 }
510}
511
512size_t
513rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
514{
515 return size_mul_or_raise(x, y, exc);
516}
517
518static inline size_t
519size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
520{
521 struct rbimpl_size_overflow_tag t = size_mul_add_overflow(x, y, z);
522 if (LIKELY(!t.overflowed)) {
523 return t.result;
524 }
525 else if (rb_during_gc()) {
526 rb_memerror(); /* or...? */
527 }
528 else {
529 gc_raise(
530 exc,
531 "integer overflow: %"PRIuSIZE
532 " * %"PRIuSIZE
533 " + %"PRIuSIZE
534 " > %"PRIuSIZE,
535 x, y, z, (size_t)SIZE_MAX);
536 }
537}
538
539size_t
540rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
541{
542 return size_mul_add_or_raise(x, y, z, exc);
543}
544
545static inline size_t
546size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
547{
548 struct rbimpl_size_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
549 if (LIKELY(!t.overflowed)) {
550 return t.result;
551 }
552 else if (rb_during_gc()) {
553 rb_memerror(); /* or...? */
554 }
555 else {
556 gc_raise(
557 exc,
558 "integer overflow: %"PRIdSIZE
559 " * %"PRIdSIZE
560 " + %"PRIdSIZE
561 " * %"PRIdSIZE
562 " > %"PRIdSIZE,
563 x, y, z, w, (size_t)SIZE_MAX);
564 }
565}
566
567#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
568/* trick the compiler into thinking a external signal handler uses this */
569volatile VALUE rb_gc_guarded_val;
570volatile VALUE *
571rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
572{
573 rb_gc_guarded_val = val;
574
575 return ptr;
576}
577#endif
578
579static const char *obj_type_name(VALUE obj);
580static st_table *id2ref_tbl;
581#include "gc/default/default.c"
582
583#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
584# error "Modular GC requires dlopen"
585#elif USE_MODULAR_GC
586#include <dlfcn.h>
587
588typedef struct gc_function_map {
589 // Bootup
590 void *(*objspace_alloc)(void);
591 void (*objspace_init)(void *objspace_ptr);
592 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
593 void (*set_params)(void *objspace_ptr);
594 void (*init)(void);
595 size_t *(*heap_sizes)(void *objspace_ptr);
596 // Shutdown
597 void (*shutdown_free_objects)(void *objspace_ptr);
598 void (*objspace_free)(void *objspace_ptr);
599 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
600 // GC
601 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
602 bool (*during_gc_p)(void *objspace_ptr);
603 void (*prepare_heap)(void *objspace_ptr);
604 void (*gc_enable)(void *objspace_ptr);
605 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
606 bool (*gc_enabled_p)(void *objspace_ptr);
607 VALUE (*config_get)(void *objpace_ptr);
608 void (*config_set)(void *objspace_ptr, VALUE hash);
609 void (*stress_set)(void *objspace_ptr, VALUE flag);
610 VALUE (*stress_get)(void *objspace_ptr);
611 struct rb_gc_vm_context *(*get_vm_context)(void *objspace_ptr);
612 // Object allocation
613 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
614 size_t (*obj_slot_size)(VALUE obj);
615 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
616 bool (*size_allocatable_p)(size_t size);
617 // Malloc
618 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
619 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
620 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
621 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
622 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
623 // Marking
624 void (*mark)(void *objspace_ptr, VALUE obj);
625 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
626 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
627 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
628 // Weak references
629 void (*declare_weak_references)(void *objspace_ptr, VALUE obj);
630 bool (*handle_weak_references_alive_p)(void *objspace_ptr, VALUE obj);
631 // Compaction
632 void (*register_pinning_obj)(void *objspace_ptr, VALUE obj);
633 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
634 VALUE (*location)(void *objspace_ptr, VALUE value);
635 // Write barriers
636 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
637 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
638 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
639 // Heap walking
640 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
641 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
642 // Finalizers
643 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
644 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
645 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
646 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
647 void (*shutdown_call_finalizer)(void *objspace_ptr);
648 // Forking
649 void (*before_fork)(void *objspace_ptr);
650 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
651 // Statistics
652 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
653 bool (*get_measure_total_time)(void *objspace_ptr);
654 unsigned long long (*get_total_time)(void *objspace_ptr);
655 size_t (*gc_count)(void *objspace_ptr);
656 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
657 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
658 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
659 const char *(*active_gc_name)(void);
660 // Miscellaneous
661 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
662 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
663 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
664 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
665 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
666
667 bool modular_gc_loaded_p;
668} rb_gc_function_map_t;
669
670static rb_gc_function_map_t rb_gc_functions;
671
672# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
673# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
674
675static void
676ruby_modular_gc_init(void)
677{
678 // Assert that the directory path ends with a /
679 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
680
681 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
682
683 rb_gc_function_map_t gc_functions = { 0 };
684
685 char *gc_so_path = NULL;
686 void *handle = NULL;
687 if (gc_so_file) {
688 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
689 * not load a shared object outside of the directory. */
690 for (size_t i = 0; i < strlen(gc_so_file); i++) {
691 char c = gc_so_file[i];
692 if (isalnum(c)) continue;
693 switch (c) {
694 case '-':
695 case '_':
696 break;
697 default:
698 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
699 exit(EXIT_FAILURE);
700 }
701 }
702
703 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
704#ifdef LOAD_RELATIVE
705 Dl_info dli;
706 size_t prefix_len = 0;
707 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
708 const char *base = strrchr(dli.dli_fname, '/');
709 if (base) {
710 size_t tail = 0;
711# define end_with_p(lit) \
712 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
713 memcmp(base - tail, lit, tail) == 0)
714
715 prefix_len = base - dli.dli_fname;
716 if (end_with_p("/bin") || end_with_p("/lib")) {
717 prefix_len -= tail;
718 }
719 prefix_len += MODULAR_GC_DIR[0] != '/';
720 gc_so_path_size += prefix_len;
721 }
722 }
723#endif
724 gc_so_path = alloca(gc_so_path_size);
725 {
726 size_t gc_so_path_idx = 0;
727#define GC_SO_PATH_APPEND(str) do { \
728 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
729} while (0)
730#ifdef LOAD_RELATIVE
731 if (prefix_len > 0) {
732 memcpy(gc_so_path, dli.dli_fname, prefix_len);
733 gc_so_path_idx = prefix_len;
734 }
735#endif
736 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
737 GC_SO_PATH_APPEND(gc_so_file);
738 GC_SO_PATH_APPEND(DLEXT);
739 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
740#undef GC_SO_PATH_APPEND
741 }
742
743 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
744 if (!handle) {
745 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
746 exit(EXIT_FAILURE);
747 }
748
749 gc_functions.modular_gc_loaded_p = true;
750 }
751
752 unsigned int err_count = 0;
753
754# define load_modular_gc_func(name) do { \
755 if (handle) { \
756 const char *func_name = "rb_gc_impl_" #name; \
757 gc_functions.name = dlsym(handle, func_name); \
758 if (!gc_functions.name) { \
759 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
760 err_count++; \
761 } \
762 } \
763 else { \
764 gc_functions.name = rb_gc_impl_##name; \
765 } \
766} while (0)
767
768 // Bootup
769 load_modular_gc_func(objspace_alloc);
770 load_modular_gc_func(objspace_init);
771 load_modular_gc_func(ractor_cache_alloc);
772 load_modular_gc_func(set_params);
773 load_modular_gc_func(init);
774 load_modular_gc_func(heap_sizes);
775 // Shutdown
776 load_modular_gc_func(shutdown_free_objects);
777 load_modular_gc_func(objspace_free);
778 load_modular_gc_func(ractor_cache_free);
779 // GC
780 load_modular_gc_func(start);
781 load_modular_gc_func(during_gc_p);
782 load_modular_gc_func(prepare_heap);
783 load_modular_gc_func(gc_enable);
784 load_modular_gc_func(gc_disable);
785 load_modular_gc_func(gc_enabled_p);
786 load_modular_gc_func(config_set);
787 load_modular_gc_func(config_get);
788 load_modular_gc_func(stress_set);
789 load_modular_gc_func(stress_get);
790 load_modular_gc_func(get_vm_context);
791 // Object allocation
792 load_modular_gc_func(new_obj);
793 load_modular_gc_func(obj_slot_size);
794 load_modular_gc_func(heap_id_for_size);
795 load_modular_gc_func(size_allocatable_p);
796 // Malloc
797 load_modular_gc_func(malloc);
798 load_modular_gc_func(calloc);
799 load_modular_gc_func(realloc);
800 load_modular_gc_func(free);
801 load_modular_gc_func(adjust_memory_usage);
802 // Marking
803 load_modular_gc_func(mark);
804 load_modular_gc_func(mark_and_move);
805 load_modular_gc_func(mark_and_pin);
806 load_modular_gc_func(mark_maybe);
807 // Weak references
808 load_modular_gc_func(declare_weak_references);
809 load_modular_gc_func(handle_weak_references_alive_p);
810 // Compaction
811 load_modular_gc_func(register_pinning_obj);
812 load_modular_gc_func(object_moved_p);
813 load_modular_gc_func(location);
814 // Write barriers
815 load_modular_gc_func(writebarrier);
816 load_modular_gc_func(writebarrier_unprotect);
817 load_modular_gc_func(writebarrier_remember);
818 // Heap walking
819 load_modular_gc_func(each_objects);
820 load_modular_gc_func(each_object);
821 // Finalizers
822 load_modular_gc_func(make_zombie);
823 load_modular_gc_func(define_finalizer);
824 load_modular_gc_func(undefine_finalizer);
825 load_modular_gc_func(copy_finalizer);
826 load_modular_gc_func(shutdown_call_finalizer);
827 // Forking
828 load_modular_gc_func(before_fork);
829 load_modular_gc_func(after_fork);
830 // Statistics
831 load_modular_gc_func(set_measure_total_time);
832 load_modular_gc_func(get_measure_total_time);
833 load_modular_gc_func(get_total_time);
834 load_modular_gc_func(gc_count);
835 load_modular_gc_func(latest_gc_info);
836 load_modular_gc_func(stat);
837 load_modular_gc_func(stat_heap);
838 load_modular_gc_func(active_gc_name);
839 // Miscellaneous
840 load_modular_gc_func(object_metadata);
841 load_modular_gc_func(pointer_to_heap_p);
842 load_modular_gc_func(garbage_object_p);
843 load_modular_gc_func(set_event_hook);
844 load_modular_gc_func(copy_attributes);
845
846 if (err_count > 0) {
847 fprintf(stderr, "ruby_modular_gc_init: found %u missing exports in library %s\n", err_count, gc_so_path);
848 exit(EXIT_FAILURE);
849 }
850
851# undef load_modular_gc_func
852
853 rb_gc_functions = gc_functions;
854}
855
856// Bootup
857# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
858# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
859# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
860# define rb_gc_impl_set_params rb_gc_functions.set_params
861# define rb_gc_impl_init rb_gc_functions.init
862# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
863// Shutdown
864# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
865# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
866# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
867// GC
868# define rb_gc_impl_start rb_gc_functions.start
869# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
870# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
871# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
872# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
873# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
874# define rb_gc_impl_config_get rb_gc_functions.config_get
875# define rb_gc_impl_config_set rb_gc_functions.config_set
876# define rb_gc_impl_stress_set rb_gc_functions.stress_set
877# define rb_gc_impl_stress_get rb_gc_functions.stress_get
878# define rb_gc_impl_get_vm_context rb_gc_functions.get_vm_context
879// Object allocation
880# define rb_gc_impl_new_obj rb_gc_functions.new_obj
881# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
882# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
883# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
884// Malloc
885# define rb_gc_impl_malloc rb_gc_functions.malloc
886# define rb_gc_impl_calloc rb_gc_functions.calloc
887# define rb_gc_impl_realloc rb_gc_functions.realloc
888# define rb_gc_impl_free rb_gc_functions.free
889# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
890// Marking
891# define rb_gc_impl_mark rb_gc_functions.mark
892# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
893# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
894# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
895// Weak references
896# define rb_gc_impl_declare_weak_references rb_gc_functions.declare_weak_references
897# define rb_gc_impl_handle_weak_references_alive_p rb_gc_functions.handle_weak_references_alive_p
898// Compaction
899# define rb_gc_impl_register_pinning_obj rb_gc_functions.register_pinning_obj
900# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
901# define rb_gc_impl_location rb_gc_functions.location
902// Write barriers
903# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
904# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
905# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
906// Heap walking
907# define rb_gc_impl_each_objects rb_gc_functions.each_objects
908# define rb_gc_impl_each_object rb_gc_functions.each_object
909// Finalizers
910# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
911# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
912# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
913# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
914# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
915// Forking
916# define rb_gc_impl_before_fork rb_gc_functions.before_fork
917# define rb_gc_impl_after_fork rb_gc_functions.after_fork
918// Statistics
919# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
920# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
921# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
922# define rb_gc_impl_gc_count rb_gc_functions.gc_count
923# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
924# define rb_gc_impl_stat rb_gc_functions.stat
925# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
926# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
927// Miscellaneous
928# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
929# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
930# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
931# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
932# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
933#endif
934
935#ifdef RUBY_ASAN_ENABLED
936static void
937asan_death_callback(void)
938{
939 if (GET_VM()) {
940 rb_bug_without_die("ASAN error");
941 }
942}
943#endif
944
945static VALUE initial_stress = Qfalse;
946
947void *
948rb_objspace_alloc(void)
949{
950#if USE_MODULAR_GC
951 ruby_modular_gc_init();
952#endif
953
954 void *objspace = rb_gc_impl_objspace_alloc();
955 ruby_current_vm_ptr->gc.objspace = objspace;
956 rb_gc_impl_objspace_init(objspace);
957 rb_gc_impl_stress_set(objspace, initial_stress);
958
959#ifdef RUBY_ASAN_ENABLED
960 __sanitizer_set_death_callback(asan_death_callback);
961#endif
962
963 return objspace;
964}
965
966void
967rb_objspace_free(void *objspace)
968{
969 rb_gc_impl_objspace_free(objspace);
970}
971
972size_t
973rb_gc_obj_slot_size(VALUE obj)
974{
975 return rb_gc_impl_obj_slot_size(obj);
976}
977
978static inline void
979gc_validate_pc(VALUE obj)
980{
981#if RUBY_DEBUG
982 // IMEMOs and objects without a class (e.g managed id table) are not traceable
983 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
984
985 rb_execution_context_t *ec = GET_EC();
986 const rb_control_frame_t *cfp = ec->cfp;
987 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && CFP_PC(cfp)) {
988 const VALUE *iseq_encoded = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
989 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(CFP_ISEQ(cfp))->iseq_size;
990 RUBY_ASSERT(CFP_PC(cfp) >= iseq_encoded, "PC not set when allocating, breaking tracing");
991 RUBY_ASSERT(CFP_PC(cfp) <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
992 }
993#endif
994}
995
996NOINLINE(static void gc_newobj_hook(VALUE obj));
997static void
998gc_newobj_hook(VALUE obj)
999{
1000 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1001 {
1002 size_t slot_size = rb_gc_obj_slot_size(obj);
1003 memset((char *)obj + sizeof(struct RBasic), 0, slot_size - sizeof(struct RBasic));
1004
1005 /* We must disable GC here because the callback could call xmalloc
1006 * which could potentially trigger a GC, and a lot of code is unsafe
1007 * to trigger a GC right after an object has been allocated because
1008 * they perform initialization for the object and assume that the
1009 * GC does not trigger before then. */
1010 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1011 {
1012 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1013 }
1014 if (!gc_disabled) rb_gc_enable();
1015 }
1016 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1017}
1018
1019VALUE
1020rb_newobj(rb_execution_context_t *ec, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
1021{
1022 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1023 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
1024 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
1025 RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
1026
1027 gc_validate_pc(obj);
1028
1029 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1030 gc_newobj_hook(obj);
1031 }
1032
1033#if RGENGC_CHECK_MODE
1034# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1035# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1036# endif
1037
1038 memset(
1039 (void *)(obj + sizeof(struct RBasic)),
1040 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1041 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1042 );
1043#endif
1044
1045 return obj;
1046}
1047
1048VALUE
1049rb_ec_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1050{
1051 return rb_newobj(ec, klass, flags, ROOT_SHAPE_ID, true, size);
1052}
1053
1054VALUE
1055rb_newobj_of_with_shape(VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1056{
1057 return rb_newobj(GET_EC(), klass, flags, shape_id, true, size);
1058}
1059
1060VALUE
1061rb_newobj_of(VALUE klass, VALUE flags, size_t size)
1062{
1063 return rb_newobj(GET_EC(), klass, flags, ROOT_SHAPE_ID, true, size);
1064}
1065
1066static
1067VALUE class_allocate_complex_instance(VALUE klass, uint32_t capacity)
1068{
1069 shape_id_t initial_shape_id = rb_shape_root(rb_gc_heap_id_for_size(sizeof(struct RObject)));
1070 VALUE obj = rb_newobj_of_with_shape(klass, T_OBJECT, initial_shape_id, sizeof(struct RObject));
1071 rb_obj_init_complex(obj, rb_st_init_numtable_with_size(capacity));
1072 return obj;
1073}
1074
1075VALUE
1076rb_class_allocate_instance(VALUE klass)
1077{
1078 uint32_t index_tbl_num_entries = RCLASS_MAX_IV_COUNT(klass);
1079 VALUE obj;
1080
1081 // Directly start as COMPLEX if we know we're over the limit.
1082 RUBY_ASSERT(rb_shape_tree.max_capacity > 0);
1083 if (RB_UNLIKELY(index_tbl_num_entries > rb_shape_tree.max_capacity)) {
1084 obj = class_allocate_complex_instance(klass, index_tbl_num_entries);
1085 }
1086 else {
1087 size_t size = rb_obj_embedded_size(index_tbl_num_entries);
1088 if (!rb_gc_size_allocatable_p(size)) {
1089 size = sizeof(struct RObject);
1090 }
1091
1092 // There might be a NEWOBJ tracepoint callback, and it may set fields.
1093 // So the shape must be passed to `NEWOBJ_OF`.
1094 obj = rb_newobj_of_with_shape(klass, T_OBJECT, rb_shape_root(rb_gc_heap_id_for_size(size)), size);
1095
1096 #if RUBY_DEBUG
1097 VALUE *ptr = ROBJECT_FIELDS(obj);
1098 size_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
1099 for (size_t i = fields_count; i < ROBJECT_FIELDS_CAPACITY(obj); i++) {
1100 ptr[i] = Qundef;
1101 }
1102 #endif
1103 }
1104
1105#if RUBY_DEBUG
1106 if (rb_obj_class(obj) != rb_class_real(klass)) {
1107 rb_bug("Expected rb_class_allocate_instance to set the class correctly");
1108 }
1109#endif
1110
1111 return obj;
1112}
1113
1114void
1115rb_gc_register_pinning_obj(VALUE obj)
1116{
1117 rb_gc_impl_register_pinning_obj(rb_gc_get_objspace(), obj);
1118}
1119
1120#define UNEXPECTED_NODE(func) \
1121 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1122 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1123
1124static inline void
1125rb_data_object_check(VALUE klass)
1126{
1127 RUBY_ASSERT(!RCLASS_SINGLETON_P(klass));
1128 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1129 rb_undef_alloc_func(klass);
1130 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1131 }
1132}
1133
1134VALUE
1135rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1136{
1138 if (klass) rb_data_object_check(klass);
1139 VALUE obj = rb_newobj(GET_EC(), klass, T_DATA, ROOT_SHAPE_ID, !dmark, sizeof(struct RTypedData));
1140
1141 rb_gc_register_pinning_obj(obj);
1142
1143 struct RData *data = (struct RData *)obj;
1144 data->dmark = dmark;
1145 data->dfree = dfree;
1146 data->data = datap;
1147
1148 return obj;
1149}
1150
1151VALUE
1153{
1154 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1155 DATA_PTR(obj) = xcalloc(1, size);
1156 return obj;
1157}
1158
1159#define RTYPEDDATA_EMBEDDED_P rbimpl_typeddata_embedded_p
1160#define RB_DATA_TYPE_EMBEDDABLE_P(type) ((type)->flags & RUBY_TYPED_EMBEDDABLE)
1161#define RTYPEDDATA_EMBEDDABLE_P(obj) RB_DATA_TYPE_EMBEDDABLE_P(RTYPEDDATA_TYPE(obj))
1162
1163static VALUE
1164typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1165{
1166 RBIMPL_NONNULL_ARG(type);
1167 if (klass) rb_data_object_check(klass);
1168 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1169 VALUE obj = rb_newobj(GET_EC(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, ROOT_SHAPE_ID, wb_protected, size);
1170
1171 rb_gc_register_pinning_obj(obj);
1172
1173 struct RTypedData *data = (struct RTypedData *)obj;
1174 data->fields_obj = 0;
1175 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1176 data->data = datap;
1177
1178 return obj;
1179}
1180
1181VALUE
1183{
1184 if (UNLIKELY(RB_DATA_TYPE_EMBEDDABLE_P(type))) {
1185 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1186 }
1187
1188 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1189}
1190
1191VALUE
1193{
1194 if (RB_DATA_TYPE_EMBEDDABLE_P(type)) {
1195 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1196 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1197 }
1198
1199 size_t embed_size = offsetof(struct RTypedData, data) + size;
1200 if (rb_gc_size_allocatable_p(embed_size)) {
1201 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1202 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1203 return obj;
1204 }
1205 }
1206
1207 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1208 DATA_PTR(obj) = xcalloc(1, size);
1209 return obj;
1210}
1211
1212static size_t
1213ruby_xmalloc_usable_size(void *ptr)
1214{
1215#ifdef HAVE_MALLOC_USABLE_SIZE
1216#if CALC_EXACT_MALLOC_SIZE
1217 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
1218 return malloc_usable_size(info) - sizeof(struct malloc_obj_info);
1219#else
1220 return malloc_usable_size(ptr);
1221#endif
1222#else
1223 return 0;
1224#endif
1225}
1226
1227static size_t
1228rb_objspace_data_type_memsize(VALUE obj)
1229{
1230 size_t size = 0;
1231 if (RTYPEDDATA_P(obj)) {
1232 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1233
1234 if (ptr) {
1235 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1236 size += ruby_xmalloc_usable_size((void *)ptr);
1237 }
1238
1239 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1240 if (type->function.dsize) {
1241 size += type->function.dsize(ptr);
1242 }
1243 }
1244 }
1245
1246 return size;
1247}
1248
1249const char *
1250rb_objspace_data_type_name(VALUE obj)
1251{
1252 if (RTYPEDDATA_P(obj)) {
1253 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1254 }
1255 else {
1256 return 0;
1257 }
1258}
1259
1260void
1261rb_gc_declare_weak_references(VALUE obj)
1262{
1263 rb_gc_impl_declare_weak_references(rb_gc_get_objspace(), obj);
1264}
1265
1266bool
1267rb_gc_handle_weak_references_alive_p(VALUE obj)
1268{
1269 if (SPECIAL_CONST_P(obj)) return true;
1270
1271 return rb_gc_impl_handle_weak_references_alive_p(rb_gc_get_objspace(), obj);
1272}
1273
1274void
1275rb_gc_handle_weak_references(VALUE obj)
1276{
1277 switch (BUILTIN_TYPE(obj)) {
1278 case T_DATA:
1279 if (RTYPEDDATA_P(obj)) {
1280 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1281
1282 if (type->function.handle_weak_references) {
1283 (type->function.handle_weak_references)(RTYPEDDATA_GET_DATA(obj));
1284 }
1285 else {
1286 rb_bug(
1287 "rb_gc_handle_weak_references: TypedData %s does not implement handle_weak_references",
1288 RTYPEDDATA_TYPE(obj)->wrap_struct_name
1289 );
1290 }
1291 }
1292 else {
1293 rb_bug("rb_gc_handle_weak_references: unknown T_DATA");
1294 }
1295 break;
1296
1297 case T_IMEMO: {
1298 switch (imemo_type(obj)) {
1299 case imemo_callcache: {
1300 struct rb_callcache *cc = (struct rb_callcache *)obj;
1301 if (cc->klass != Qundef &&
1302 (!rb_gc_handle_weak_references_alive_p(cc->klass) ||
1303 !rb_gc_handle_weak_references_alive_p((VALUE)cc->cme_))) {
1304 vm_cc_invalidate(cc);
1305 }
1306 break;
1307 }
1308 case imemo_subclasses: {
1309 struct rb_subclasses *subs = (struct rb_subclasses *)obj;
1310 VALUE *entries = rb_imemo_subclasses_entries(obj);
1311 for (uint32_t i = 0; i < subs->count; i++) {
1312 if (entries[i] && !rb_gc_handle_weak_references_alive_p(entries[i])) {
1313 entries[i] = 0;
1314 }
1315 }
1316 break;
1317 }
1318 default:
1319 rb_bug("rb_gc_handle_weak_references: unexpected imemo type");
1320 }
1321
1322 break;
1323 }
1324 default:
1325 rb_bug("rb_gc_handle_weak_references: type not supported\n");
1326 }
1327}
1328
1329static inline bool
1330rb_gc_imemo_needs_cleanup_p(VALUE obj)
1331{
1332 switch (imemo_type(obj)) {
1333 case imemo_constcache:
1334 case imemo_cref:
1335 case imemo_ifunc:
1336 case imemo_memo:
1337 case imemo_svar:
1338 case imemo_callcache:
1339 case imemo_throw_data:
1340 case imemo_cvar_entry:
1341 return false;
1342
1343 case imemo_env:
1344 case imemo_ment:
1345 case imemo_iseq:
1346 case imemo_callinfo:
1347 return true;
1348
1349 case imemo_subclasses:
1350 return FL_TEST_RAW(obj, IMEMO_SUBCLASSES_HEAP);
1351
1352 case imemo_tmpbuf:
1353 return ((rb_imemo_tmpbuf_t *)obj)->ptr != NULL;
1354
1355 case imemo_fields:
1356 return FL_TEST_RAW(obj, OBJ_FIELD_HEAP) || (id2ref_tbl && rb_obj_shape_has_id(obj));
1357 }
1358 UNREACHABLE_RETURN(true);
1359}
1360
1361/*
1362 * Returns true if the object requires a full rb_gc_obj_free() call during sweep,
1363 * false if it can be freed quickly without calling destructors or cleanup.
1364 *
1365 * Objects that return false are:
1366 * - Simple embedded objects without external allocations
1367 * - Objects without finalizers
1368 * - Objects without object IDs registered in id2ref
1369 * - Objects without generic instance variables
1370 *
1371 * This is used by the GC sweep fast path to avoid function call overhead
1372 * for the majority of simple objects.
1373 */
1374bool
1375rb_gc_obj_needs_cleanup_p(VALUE obj)
1376{
1377 VALUE flags = RBASIC(obj)->flags;
1378
1379 if (flags & FL_FINALIZE) return true;
1380
1381 switch (flags & RUBY_T_MASK) {
1382 case T_IMEMO:
1383 return rb_gc_imemo_needs_cleanup_p(obj);
1384
1385 case T_DATA:
1386 case T_OBJECT:
1387 case T_STRING:
1388 case T_ARRAY:
1389 case T_HASH:
1390 case T_BIGNUM:
1391 case T_STRUCT:
1392 case T_FLOAT:
1393 case T_RATIONAL:
1394 case T_COMPLEX:
1395 break;
1396
1397 case T_FILE:
1398 case T_SYMBOL:
1399 case T_CLASS:
1400 case T_ICLASS:
1401 case T_MODULE:
1402 case T_REGEXP:
1403 case T_MATCH:
1404 return true;
1405 }
1406
1407 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1408 if (id2ref_tbl && rb_shape_has_object_id(shape_id)) return true;
1409
1410 switch (flags & RUBY_T_MASK) {
1411 case T_OBJECT:
1412 if (flags & ROBJECT_HEAP) return true;
1413 return false;
1414
1415 case T_DATA:
1416 if (flags & RUBY_TYPED_FL_IS_TYPED_DATA) {
1417 uintptr_t type = (uintptr_t)RTYPEDDATA(obj)->type;
1418 if (type & TYPED_DATA_EMBEDDED) {
1419 RUBY_DATA_FUNC dfree = ((const rb_data_type_t *)(type & TYPED_DATA_PTR_MASK))->function.dfree;
1420 if (dfree == RUBY_NEVER_FREE || dfree == RUBY_TYPED_DEFAULT_FREE) {
1421 return false;
1422 }
1423 }
1424 }
1425 return true;
1426
1427 case T_STRING:
1428 if (flags & (RSTRING_NOEMBED | RSTRING_FSTR)) return true;
1429 return rb_shape_has_fields(shape_id);
1430
1431 case T_ARRAY:
1432 if (!(flags & RARRAY_EMBED_FLAG)) return true;
1433 return rb_shape_has_fields(shape_id);
1434
1435 case T_HASH:
1436 if (flags & RHASH_ST_TABLE_FLAG) return true;
1437 return rb_shape_has_fields(shape_id);
1438
1439 case T_BIGNUM:
1440 if (!(flags & BIGNUM_EMBED_FLAG)) return true;
1441 return rb_shape_has_fields(shape_id);
1442
1443 case T_STRUCT:
1444 if (!(flags & RSTRUCT_EMBED_LEN_MASK)) return true;
1445 if (flags & RSTRUCT_GEN_FIELDS) return rb_shape_has_fields(shape_id);
1446 return false;
1447
1448 case T_FLOAT:
1449 case T_RATIONAL:
1450 case T_COMPLEX:
1451 return rb_shape_has_fields(shape_id);
1452
1453 default:
1454 UNREACHABLE_RETURN(true);
1455 }
1456}
1457
1458static void
1459io_fptr_finalize(void *fptr)
1460{
1461 rb_io_fptr_finalize((struct rb_io *)fptr);
1462}
1463
1464static inline void
1465make_io_zombie(void *objspace, VALUE obj)
1466{
1467 rb_io_t *fptr = RFILE(obj)->fptr;
1468 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1469}
1470
1471static bool
1472rb_data_free(void *objspace, VALUE obj)
1473{
1474 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1475 if (data) {
1476 int free_immediately = false;
1477 void (*dfree)(void *);
1478
1479 if (RTYPEDDATA_P(obj)) {
1480 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1481 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1482 }
1483 else {
1484 dfree = RDATA(obj)->dfree;
1485 }
1486
1487 if (dfree) {
1488 if (dfree == RUBY_DEFAULT_FREE) {
1489 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1490 xfree(data);
1491 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1492 }
1493 }
1494 else if (free_immediately) {
1495 (*dfree)(data);
1496 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1497 xfree(data);
1498 }
1499
1500 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1501 }
1502 else {
1503 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1504 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1505 return FALSE;
1506 }
1507 }
1508 else {
1509 RB_DEBUG_COUNTER_INC(obj_data_empty);
1510 }
1511 }
1512
1513 return true;
1514}
1515
1517 VALUE klass;
1518 rb_objspace_t *objspace; // used for update_*
1519};
1520
1521static void
1522classext_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1523{
1524 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1525
1526 rb_class_classext_free(args->klass, ext, is_prime);
1527}
1528
1529static void
1530classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1531{
1532 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1533
1534 rb_iclass_classext_free(args->klass, ext, is_prime);
1535}
1536
1537bool
1538rb_gc_obj_free(void *objspace, VALUE obj)
1539{
1540 struct classext_foreach_args args;
1541
1542 RB_DEBUG_COUNTER_INC(obj_free);
1543
1544 switch (BUILTIN_TYPE(obj)) {
1545 case T_NIL:
1546 case T_FIXNUM:
1547 case T_TRUE:
1548 case T_FALSE:
1549 rb_bug("obj_free() called for broken object");
1550 break;
1551 default:
1552 break;
1553 }
1554
1555 switch (BUILTIN_TYPE(obj)) {
1556 case T_OBJECT:
1557 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1558 if (rb_obj_shape_complex_p(obj)) {
1559 RB_DEBUG_COUNTER_INC(obj_obj_complex);
1560 st_free_table(ROBJECT_FIELDS_HASH(obj));
1561 }
1562 else {
1563 SIZED_FREE_N(ROBJECT(obj)->as.heap.fields, ROBJECT_FIELDS_CAPACITY(obj));
1564 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1565 }
1566 }
1567 else {
1568 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1569 }
1570 break;
1571 case T_MODULE:
1572 case T_CLASS:
1573#if USE_ZJIT
1574 rb_zjit_klass_free(obj);
1575#endif
1576 args.klass = obj;
1577 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1578 if (RCLASS_CLASSEXT_TBL(obj)) {
1579 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1580 }
1581 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1582 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1583 break;
1584 case T_STRING:
1585 rb_str_free(obj);
1586 break;
1587 case T_ARRAY:
1588 rb_ary_free(obj);
1589 break;
1590 case T_HASH:
1591#if USE_DEBUG_COUNTER
1592 switch (RHASH_SIZE(obj)) {
1593 case 0:
1594 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1595 break;
1596 case 1:
1597 RB_DEBUG_COUNTER_INC(obj_hash_1);
1598 break;
1599 case 2:
1600 RB_DEBUG_COUNTER_INC(obj_hash_2);
1601 break;
1602 case 3:
1603 RB_DEBUG_COUNTER_INC(obj_hash_3);
1604 break;
1605 case 4:
1606 RB_DEBUG_COUNTER_INC(obj_hash_4);
1607 break;
1608 case 5:
1609 case 6:
1610 case 7:
1611 case 8:
1612 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1613 break;
1614 default:
1615 GC_ASSERT(RHASH_SIZE(obj) > 8);
1616 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1617 }
1618
1619 if (RHASH_AR_TABLE_P(obj)) {
1620 if (RHASH_AR_TABLE(obj) == NULL) {
1621 RB_DEBUG_COUNTER_INC(obj_hash_null);
1622 }
1623 else {
1624 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1625 }
1626 }
1627 else {
1628 RB_DEBUG_COUNTER_INC(obj_hash_st);
1629 }
1630#endif
1631
1632 rb_hash_free(obj);
1633 break;
1634 case T_REGEXP:
1635 if (RREGEXP(obj)->ptr) {
1636 onig_free(RREGEXP(obj)->ptr);
1637 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1638 }
1639 break;
1640 case T_DATA:
1641 if (!rb_data_free(objspace, obj)) return false;
1642 break;
1643 case T_MATCH:
1644 {
1645 struct RMatch *rm = RMATCH(obj);
1646#if USE_DEBUG_COUNTER
1647 if (rm->num_regs >= 8) {
1648 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1649 }
1650 else if (rm->num_regs >= 4) {
1651 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1652 }
1653 else if (rm->num_regs >= 1) {
1654 RB_DEBUG_COUNTER_INC(obj_match_under4);
1655 }
1656#endif
1657 if (FL_TEST_RAW(obj, RMATCH_ONIG)) {
1658 onig_region_free(&rm->as.onig, 0);
1659 }
1660 SIZED_FREE_N(rm->char_offset, rm->char_offset_num_allocated);
1661
1662 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1663 }
1664 break;
1665 case T_FILE:
1666 if (RFILE(obj)->fptr) {
1667 make_io_zombie(objspace, obj);
1668 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1669 return FALSE;
1670 }
1671 break;
1672 case T_RATIONAL:
1673 RB_DEBUG_COUNTER_INC(obj_rational);
1674 break;
1675 case T_COMPLEX:
1676 RB_DEBUG_COUNTER_INC(obj_complex);
1677 break;
1678 case T_MOVED:
1679 break;
1680 case T_ICLASS:
1681 args.klass = obj;
1682
1683 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1684 if (RCLASS_CLASSEXT_TBL(obj)) {
1685 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1686 }
1687
1688 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1689 break;
1690
1691 case T_FLOAT:
1692 RB_DEBUG_COUNTER_INC(obj_float);
1693 break;
1694
1695 case T_BIGNUM:
1696 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1697 SIZED_FREE_N(BIGNUM_DIGITS(obj), BIGNUM_LEN(obj));
1698 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1699 }
1700 else {
1701 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1702 }
1703 break;
1704
1705 case T_NODE:
1706 UNEXPECTED_NODE(obj_free);
1707 break;
1708
1709 case T_STRUCT:
1710 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1711 RSTRUCT(obj)->as.heap.ptr == NULL) {
1712 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1713 }
1714 else {
1715 SIZED_FREE_N(RSTRUCT(obj)->as.heap.ptr, RSTRUCT(obj)->as.heap.len);
1716 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1717 }
1718 break;
1719
1720 case T_SYMBOL:
1721 RB_DEBUG_COUNTER_INC(obj_symbol);
1722 break;
1723
1724 case T_IMEMO:
1725 rb_imemo_free((VALUE)obj);
1726 break;
1727
1728 default:
1729 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1730 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1731 }
1732
1733 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1734 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1735 return FALSE;
1736 }
1737 else {
1738 return TRUE;
1739 }
1740}
1741
1742void
1743rb_objspace_set_event_hook(const rb_event_flag_t event)
1744{
1745 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1746}
1747
1748static int
1749internal_object_p(VALUE obj)
1750{
1751 void *ptr = asan_unpoison_object_temporary(obj);
1752
1753 if (RBASIC(obj)->flags) {
1754 switch (BUILTIN_TYPE(obj)) {
1755 case T_NODE:
1756 UNEXPECTED_NODE(internal_object_p);
1757 break;
1758 case T_NONE:
1759 case T_MOVED:
1760 case T_IMEMO:
1761 case T_ICLASS:
1762 case T_ZOMBIE:
1763 break;
1764 case T_CLASS:
1765 if (obj == rb_mRubyVMFrozenCore)
1766 return 1;
1767
1768 if (!RBASIC_CLASS(obj)) break;
1769 if (RCLASS_SINGLETON_P(obj)) {
1770 return rb_singleton_class_internal_p(obj);
1771 }
1772 return 0;
1773 default:
1774 if (!RBASIC(obj)->klass) break;
1775 return 0;
1776 }
1777 }
1778 if (ptr || !RBASIC(obj)->flags) {
1779 rb_asan_poison_object(obj);
1780 }
1781 return 1;
1782}
1783
1784int
1785rb_objspace_internal_object_p(VALUE obj)
1786{
1787 return internal_object_p(obj);
1788}
1789
1791 size_t num;
1792 VALUE of;
1793};
1794
1795static int
1796os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1797{
1798 struct os_each_struct *oes = (struct os_each_struct *)data;
1799
1800 VALUE v = (VALUE)vstart;
1801 for (; v != (VALUE)vend; v += stride) {
1802 if (!internal_object_p(v)) {
1803 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1804 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1805 rb_yield(v);
1806 oes->num++;
1807 }
1808 }
1809 }
1810 }
1811
1812 return 0;
1813}
1814
1815static VALUE
1816os_obj_of(VALUE of)
1817{
1818 struct os_each_struct oes;
1819
1820 oes.num = 0;
1821 oes.of = of;
1822 rb_objspace_each_objects(os_obj_of_i, &oes);
1823 return SIZET2NUM(oes.num);
1824}
1825
1826/*
1827 * call-seq:
1828 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1829 * ObjectSpace.each_object([module]) -> an_enumerator
1830 *
1831 * Calls the block once for each living, nonimmediate object in this
1832 * Ruby process. If <i>module</i> is specified, calls the block
1833 * for only those classes or modules that match (or are a subclass of)
1834 * <i>module</i>. Returns the number of objects found. Immediate
1835 * objects (such as <code>Fixnum</code>s, static <code>Symbol</code>s
1836 * <code>true</code>, <code>false</code> and <code>nil</code>) are
1837 * never returned.
1838 *
1839 * If no block is given, an enumerator is returned instead.
1840 *
1841 * Job = Class.new
1842 * jobs = [Job.new, Job.new]
1843 * count = ObjectSpace.each_object(Job) {|x| p x }
1844 * puts "Total count: #{count}"
1845 *
1846 * <em>produces:</em>
1847 *
1848 * #<Job:0x000000011d6cbbf0>
1849 * #<Job:0x000000011d6cbc68>
1850 * Total count: 2
1851 *
1852 * Due to a current Ractor implementation issue, this method does not yield
1853 * Ractor-unshareable objects when the process is in multi-Ractor mode. Multi-ractor
1854 * mode is enabled when <code>Ractor.new</code> has been called for the first time.
1855 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1856 *
1857 * a = 12345678987654321 # shareable
1858 * b = [].freeze # shareable
1859 * c = {} # not shareable
1860 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1861 * Ractor.new {} # enter multi-Ractor mode
1862 * ObjectSpace.each_object {|x| x } # does not yield c
1863 *
1864 */
1865
1866static VALUE
1867os_each_obj(int argc, VALUE *argv, VALUE os)
1868{
1869 VALUE of;
1870
1871 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1872 RETURN_ENUMERATOR(os, 1, &of);
1873 return os_obj_of(of);
1874}
1875
1876/*
1877 * call-seq:
1878 * ObjectSpace.undefine_finalizer(obj)
1879 *
1880 * Removes all finalizers for <i>obj</i>.
1881 *
1882 */
1883
1884static VALUE
1885undefine_final(VALUE os, VALUE obj)
1886{
1887 return rb_undefine_finalizer(obj);
1888}
1889
1890VALUE
1891rb_undefine_finalizer(VALUE obj)
1892{
1893 rb_check_frozen(obj);
1894
1895 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1896
1897 return obj;
1898}
1899
1900static void
1901should_be_callable(VALUE block)
1902{
1903 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1904 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1905 rb_obj_class(block));
1906 }
1907}
1908
1909static void
1910should_be_finalizable(VALUE obj)
1911{
1912 if (!FL_ABLE(obj)) {
1913 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1914 rb_obj_classname(obj));
1915 }
1916 rb_check_frozen(obj);
1917}
1918
1919void
1920rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1921{
1922 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1923}
1924
1925/*
1926 * call-seq:
1927 * ObjectSpace.define_finalizer(obj) {|id| ... } -> array
1928 * ObjectSpace.define_finalizer(obj, finalizer) -> array
1929 *
1930 * Adds a new finalizer for +obj+ that is called when +obj+ is destroyed
1931 * by the garbage collector or when Ruby shuts down (which ever comes first).
1932 *
1933 * With a block given, uses the block as the callback. Without a block given,
1934 * uses a callable object +finalizer+ as the callback. The callback is called
1935 * when +obj+ is destroyed with a single argument +id+ which is the object
1936 * ID of +obj+ (see Object#object_id).
1937 *
1938 * The return value is an array <code>[0, callback]</code>, where +callback+
1939 * is a Proc created from the block if one was given or +finalizer+ otherwise.
1940 *
1941 * Note that defining a finalizer in an instance method of the object may prevent
1942 * the object from being garbage collected since if the block or +finalizer+ refers
1943 * to +obj+ then +obj+ will never be reclaimed by the garbage collector. For example,
1944 * the following script demonstrates the issue:
1945 *
1946 * class Foo
1947 * def define_final
1948 * ObjectSpace.define_finalizer(self) do |id|
1949 * puts "Running finalizer for #{id}!"
1950 * end
1951 * end
1952 * end
1953 *
1954 * obj = Foo.new
1955 * obj.define_final
1956 *
1957 * There are two patterns to solve this issue:
1958 *
1959 * - Create the finalizer in a non-instance method so it can safely capture
1960 * the needed state:
1961 *
1962 * class Foo
1963 * def define_final
1964 * ObjectSpace.define_finalizer(self, self.class.create_finalizer)
1965 * end
1966 *
1967 * def self.create_finalizer
1968 * proc do |id|
1969 * puts "Running finalizer for #{id}!"
1970 * end
1971 * end
1972 * end
1973 *
1974 * - Use a callable object:
1975 *
1976 * class Foo
1977 * class Finalizer
1978 * def call(id)
1979 * puts "Running finalizer for #{id}!"
1980 * end
1981 * end
1982 *
1983 * def define_final
1984 * ObjectSpace.define_finalizer(self, Finalizer.new)
1985 * end
1986 * end
1987 *
1988 * Note that finalization can be unpredictable and is never guaranteed
1989 * to be run except on exit.
1990 */
1991
1992static VALUE
1993define_final(int argc, VALUE *argv, VALUE os)
1994{
1995 VALUE obj, block;
1996
1997 rb_scan_args(argc, argv, "11", &obj, &block);
1998 if (argc == 1) {
1999 block = rb_block_proc();
2000 }
2001
2002 if (rb_callable_receiver(block) == obj) {
2003 rb_warn("finalizer references object to be finalized");
2004 }
2005
2006 return rb_define_finalizer(obj, block);
2007}
2008
2009VALUE
2010rb_define_finalizer(VALUE obj, VALUE block)
2011{
2012 should_be_finalizable(obj);
2013 should_be_callable(block);
2014
2015 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
2016
2017 block = rb_ary_new3(2, INT2FIX(0), block);
2018 OBJ_FREEZE(block);
2019 return block;
2020}
2021
2022void
2023rb_objspace_call_finalizer(void)
2024{
2025 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
2026}
2027
2028void
2029rb_objspace_free_objects(void *objspace)
2030{
2031 rb_gc_impl_shutdown_free_objects(objspace);
2032}
2033
2034int
2035rb_objspace_garbage_object_p(VALUE obj)
2036{
2037 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
2038}
2039
2040bool
2041rb_gc_pointer_to_heap_p(VALUE obj)
2042{
2043 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
2044}
2045
2046#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
2047#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
2048static VALUE id2ref_value = 0;
2049
2050#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
2051static size_t object_id_counter = 1;
2052#else
2053static unsigned long long object_id_counter = 1;
2054#endif
2055
2056static inline VALUE
2057generate_next_object_id(void)
2058{
2059#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
2060 // 64bit atomics are available
2061 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
2062#else
2063 unsigned int lock_lev = RB_GC_VM_LOCK();
2064 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
2065 RB_GC_VM_UNLOCK(lock_lev);
2066 return id;
2067#endif
2068}
2069
2070void
2071rb_gc_obj_id_moved(VALUE obj)
2072{
2073 if (UNLIKELY(id2ref_tbl)) {
2074 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
2075 }
2076}
2077
2078static int
2079object_id_cmp(st_data_t x, st_data_t y)
2080{
2081 if (RB_TYPE_P(x, T_BIGNUM)) {
2082 return !rb_big_eql(x, y);
2083 }
2084 else {
2085 return x != y;
2086 }
2087}
2088
2089static st_index_t
2090object_id_hash(st_data_t n)
2091{
2092 return FIX2LONG(rb_hash((VALUE)n));
2093}
2094
2095static const struct st_hash_type object_id_hash_type = {
2096 object_id_cmp,
2097 object_id_hash,
2098};
2099
2100static void gc_mark_tbl_no_pin(st_table *table);
2101
2102static void
2103id2ref_tbl_mark(void *data)
2104{
2105 st_table *table = (st_table *)data;
2106 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
2107 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
2108 rb_mark_set(table);
2109 }
2110 // We purposely don't mark values, as they are weak references.
2111 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
2112}
2113
2114static size_t
2115id2ref_tbl_memsize(const void *data)
2116{
2117 return rb_st_memsize(data);
2118}
2119
2120static void
2121id2ref_tbl_free(void *data)
2122{
2123 id2ref_tbl = NULL; // clear global ref
2124 st_table *table = (st_table *)data;
2125 st_free_table(table);
2126}
2127
2128static const rb_data_type_t id2ref_tbl_type = {
2129 .wrap_struct_name = "VM/_id2ref_table",
2130 .function = {
2131 .dmark = id2ref_tbl_mark,
2132 .dfree = id2ref_tbl_free,
2133 .dsize = id2ref_tbl_memsize,
2134 // dcompact function not required because the table is reference updated
2135 // in rb_gc_vm_weak_table_foreach
2136 },
2137 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
2138};
2139
2140static VALUE
2141class_object_id(VALUE klass)
2142{
2143 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
2144 if (!id) {
2145 unsigned int lock_lev = RB_GC_VM_LOCK();
2146 id = generate_next_object_id();
2147 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
2148 if (existing_id) {
2149 id = existing_id;
2150 }
2151 else if (RB_UNLIKELY(id2ref_tbl)) {
2152 st_insert(id2ref_tbl, id, klass);
2153 }
2154 RB_GC_VM_UNLOCK(lock_lev);
2155 }
2156 return id;
2157}
2158
2159static inline VALUE
2160object_id_get(VALUE obj, shape_id_t shape_id)
2161{
2162 VALUE id;
2163 if (rb_shape_complex_p(shape_id)) {
2164 id = rb_obj_field_get(obj, ROOT_COMPLEX_WITH_OBJ_ID);
2165 }
2166 else {
2167 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
2168 }
2169
2170#if RUBY_DEBUG
2171 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
2172 rb_p(obj);
2173 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
2174 }
2175#endif
2176
2177 return id;
2178}
2179
2180static VALUE
2181object_id0(VALUE obj)
2182{
2183 VALUE id = Qfalse;
2184 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2185
2186 if (rb_shape_has_object_id(shape_id)) {
2187 return object_id_get(obj, shape_id);
2188 }
2189
2190 shape_id_t object_id_shape_id = rb_obj_shape_transition_object_id(obj);
2191
2192 id = generate_next_object_id();
2193 rb_obj_field_set(obj, object_id_shape_id, 0, id);
2194
2195 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
2196 RUBY_ASSERT(rb_obj_shape_has_id(obj));
2197
2198 if (RB_UNLIKELY(id2ref_tbl)) {
2199 RB_VM_LOCKING() {
2200 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
2201 }
2202 }
2203 return id;
2204}
2205
2206static VALUE
2207object_id(VALUE obj)
2208{
2209 switch (BUILTIN_TYPE(obj)) {
2210 case T_CLASS:
2211 case T_MODULE:
2212 // With Ruby Box, classes and modules have different fields
2213 // in different boxes, so we cannot store the object id
2214 // in fields.
2215 return class_object_id(obj);
2216 case T_IMEMO:
2217 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
2218 break;
2219 default:
2220 break;
2221 }
2222
2223 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
2224 unsigned int lock_lev = RB_GC_VM_LOCK();
2225 VALUE id = object_id0(obj);
2226 RB_GC_VM_UNLOCK(lock_lev);
2227 return id;
2228 }
2229
2230 return object_id0(obj);
2231}
2232
2233static void
2234build_id2ref_i(VALUE obj, void *data)
2235{
2236 st_table *id2ref_tbl = (st_table *)data;
2237
2238 switch (BUILTIN_TYPE(obj)) {
2239 case T_CLASS:
2240 case T_MODULE:
2241 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2242 if (RCLASS(obj)->object_id) {
2243 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
2244 }
2245 break;
2246 case T_IMEMO:
2247 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2248 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_obj_shape_has_id(obj)) {
2249 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
2250 }
2251 break;
2252 case T_OBJECT:
2253 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2254 if (rb_obj_shape_has_id(obj)) {
2255 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
2256 }
2257 break;
2258 default:
2259 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
2260 break;
2261 }
2262}
2263
2264static VALUE
2265object_id_to_ref(void *objspace_ptr, VALUE object_id)
2266{
2267 rb_objspace_t *objspace = objspace_ptr;
2268
2269 unsigned int lev = RB_GC_VM_LOCK();
2270
2271 if (!id2ref_tbl) {
2272 rb_gc_vm_barrier(); // stop other ractors
2273
2274 // GC Must not trigger while we build the table, otherwise if we end
2275 // up freeing an object that had an ID, we might try to delete it from
2276 // the table even though it wasn't inserted yet.
2277 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
2278 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
2279
2280 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
2281 // objects we just added to the table.
2282 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
2283 bool gc_disabled = RTEST(rb_gc_disable());
2284 {
2285 id2ref_tbl = tmp_id2ref_tbl;
2286 id2ref_value = tmp_id2ref_value;
2287
2288 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
2289 }
2290 if (!gc_disabled) rb_gc_enable();
2291 }
2292
2293 VALUE obj;
2294 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2295
2296 RB_GC_VM_UNLOCK(lev);
2297
2298 if (found) {
2299 return obj;
2300 }
2301
2302 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2303 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2304 }
2305 else {
2306 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2307 }
2308}
2309
2310static inline void
2311obj_free_object_id(VALUE obj)
2312{
2313 VALUE obj_id = 0;
2314 if (RB_UNLIKELY(id2ref_tbl)) {
2315 switch (BUILTIN_TYPE(obj)) {
2316 case T_CLASS:
2317 case T_MODULE:
2318 obj_id = RCLASS(obj)->object_id;
2319 break;
2320 case T_IMEMO:
2321 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2322 return;
2323 }
2324 // fallthrough
2325 case T_OBJECT:
2326 {
2327 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2328 if (rb_shape_has_object_id(shape_id)) {
2329 obj_id = object_id_get(obj, shape_id);
2330 }
2331 break;
2332 }
2333 default:
2334 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2335 return;
2336 }
2337
2338 if (RB_UNLIKELY(obj_id)) {
2339 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2340
2341 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2342 // The the object is a T_IMEMO/fields, then it's possible the actual object
2343 // has been garbage collected already.
2344 if (!RB_TYPE_P(obj, T_IMEMO)) {
2345 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2346 }
2347 }
2348 }
2349 }
2350}
2351
2352void
2353rb_gc_obj_free_vm_weak_references(VALUE obj)
2354{
2356 obj_free_object_id(obj);
2357
2358 if (rb_obj_gen_fields_p(obj)) {
2360 }
2361
2362 switch (BUILTIN_TYPE(obj)) {
2363 case T_STRING:
2364 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2365 rb_gc_free_fstring(obj);
2366 }
2367 break;
2368 case T_SYMBOL:
2369 rb_gc_free_dsymbol(obj);
2370 break;
2371 case T_IMEMO:
2372 switch (imemo_type(obj)) {
2373 case imemo_callinfo:
2374 rb_vm_ci_free((const struct rb_callinfo *)obj);
2375 break;
2376 case imemo_ment:
2377 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2378 break;
2379 default:
2380 break;
2381 }
2382 break;
2383 default:
2384 break;
2385 }
2386}
2387
2388/*
2389 * call-seq:
2390 * ObjectSpace._id2ref(object_id) -> an_object
2391 *
2392 * Converts an object id to a reference to the object. May not be
2393 * called on an object id passed as a parameter to a finalizer.
2394 *
2395 * s = "I am a string" #=> "I am a string"
2396 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2397 * r == s #=> true
2398 *
2399 * On multi-ractor mode, if the object is not shareable, it raises
2400 * RangeError.
2401 *
2402 * This method is deprecated and should no longer be used.
2403 */
2404
2405static VALUE
2406id2ref(VALUE objid)
2407{
2408 objid = rb_to_int(objid);
2409 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2410 VALUE ptr = (VALUE)NUM2PTR(objid);
2411 if (SPECIAL_CONST_P(ptr)) {
2412 if (ptr == Qtrue) return Qtrue;
2413 if (ptr == Qfalse) return Qfalse;
2414 if (NIL_P(ptr)) return Qnil;
2415 if (FIXNUM_P(ptr)) return ptr;
2416 if (FLONUM_P(ptr)) return ptr;
2417
2418 if (SYMBOL_P(ptr)) {
2419 // Check that the symbol is valid
2420 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2421 return ptr;
2422 }
2423 else {
2424 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2425 }
2426 }
2427
2428 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2429 }
2430 }
2431
2432 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2433 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2434 return obj;
2435 }
2436 else {
2437 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2438 }
2439}
2440
2441/* :nodoc: */
2442static VALUE
2443os_id2ref(VALUE os, VALUE objid)
2444{
2445 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2446 return id2ref(objid);
2447}
2448
2449static VALUE
2450rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2451{
2452 if (SPECIAL_CONST_P(obj)) {
2453#if SIZEOF_LONG == SIZEOF_VOIDP
2454 return LONG2NUM((SIGNED_VALUE)obj);
2455#else
2456 return LL2NUM((SIGNED_VALUE)obj);
2457#endif
2458 }
2459
2460 return get_heap_object_id(obj);
2461}
2462
2463static VALUE
2464nonspecial_obj_id(VALUE obj)
2465{
2466#if SIZEOF_LONG == SIZEOF_VOIDP
2467 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2468#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2469 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2470#else
2471# error not supported
2472#endif
2473}
2474
2475VALUE
2476rb_memory_id(VALUE obj)
2477{
2478 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2479}
2480
2481/*
2482 * Document-method: __id__
2483 * Document-method: object_id
2484 *
2485 * call-seq:
2486 * obj.__id__ -> integer
2487 * obj.object_id -> integer
2488 *
2489 * Returns an integer identifier for +obj+.
2490 *
2491 * The same number will be returned on all calls to +object_id+ for a given
2492 * object, and no two active objects will share an id.
2493 *
2494 * Note: that some objects of builtin classes are reused for optimization.
2495 * This is the case for immediate values and frozen string literals.
2496 *
2497 * BasicObject implements +__id__+, Kernel implements +object_id+.
2498 *
2499 * Immediate values are not passed by reference but are passed by value:
2500 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2501 *
2502 * Object.new.object_id == Object.new.object_id # => false
2503 * (21 * 2).object_id == (21 * 2).object_id # => true
2504 * "hello".object_id == "hello".object_id # => false
2505 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2506 */
2507
2508VALUE
2509rb_obj_id(VALUE obj)
2510{
2511 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2512 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2513 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2514 * any immediates. */
2515 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2516}
2517
2518bool
2519rb_obj_id_p(VALUE obj)
2520{
2521 return !RB_TYPE_P(obj, T_IMEMO) && rb_obj_shape_has_id(obj);
2522}
2523
2524/*
2525 * GC implementations should call this function before the GC phase that updates references
2526 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2527 * "W^X" policy and protect the code memory from being modified during execution. This function
2528 * makes the code memory writeable.
2529 */
2530void
2531rb_gc_before_updating_jit_code(void)
2532{
2533#if USE_YJIT
2534 rb_yjit_mark_all_writeable();
2535#endif
2536#if USE_ZJIT
2537 rb_zjit_mark_all_writable();
2538#endif
2539}
2540
2541/*
2542 * GC implementations should call this function before the GC phase that updates references
2543 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2544 * executable again.
2545 */
2546void
2547rb_gc_after_updating_jit_code(void)
2548{
2549#if USE_YJIT
2550 rb_yjit_mark_all_executable();
2551#endif
2552#if USE_ZJIT
2553 rb_zjit_mark_all_executable();
2554#endif
2555}
2556
2557static void
2558classext_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2559{
2560 size_t *size = (size_t *)arg;
2561 size_t s = 0;
2562
2563 if (RCLASSEXT_M_TBL(ext)) {
2564 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2565 }
2566 if (RCLASSEXT_CONST_TBL(ext)) {
2567 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2568 }
2569 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2570 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2571 }
2572 if (!prime) {
2573 s += sizeof(rb_classext_t);
2574 }
2575 *size += s;
2576}
2577
2578static void
2579classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2580{
2581 size_t *size = (size_t *)arg;
2582 size_t array_size;
2583 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2584 RUBY_ASSERT(prime);
2585 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2586 *size += array_size * sizeof(VALUE);
2587 }
2588}
2589
2590size_t
2591rb_obj_memsize_of(VALUE obj)
2592{
2593 size_t size = 0;
2594
2595 if (SPECIAL_CONST_P(obj)) {
2596 return 0;
2597 }
2598
2599 switch (BUILTIN_TYPE(obj)) {
2600 case T_OBJECT:
2601 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2602 if (rb_obj_shape_complex_p(obj)) {
2603 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2604 }
2605 else {
2606 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2607 }
2608 }
2609 break;
2610 case T_MODULE:
2611 case T_CLASS:
2612 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2613 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2614 break;
2615 case T_ICLASS:
2616 if (RICLASS_OWNS_M_TBL_P(obj)) {
2617 if (RCLASS_M_TBL(obj)) {
2618 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2619 }
2620 }
2621 break;
2622 case T_STRING:
2623 size += rb_str_memsize(obj);
2624 break;
2625 case T_ARRAY:
2626 size += rb_ary_memsize(obj);
2627 break;
2628 case T_HASH:
2629 if (RHASH_ST_TABLE_P(obj)) {
2630 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2631 /* st_table is in the slot */
2632 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2633 }
2634 break;
2635 case T_REGEXP:
2636 if (RREGEXP_PTR(obj)) {
2637 size += onig_memsize(RREGEXP_PTR(obj));
2638 }
2639 break;
2640 case T_DATA:
2641 size += rb_objspace_data_type_memsize(obj);
2642 break;
2643 case T_MATCH:
2644 {
2645 struct RMatch *rm = RMATCH(obj);
2646 if (FL_TEST_RAW(obj, RMATCH_ONIG)) {
2647 size += onig_region_memsize(&rm->as.onig);
2648 }
2649 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2650 }
2651 break;
2652 case T_FILE:
2653 if (RFILE(obj)->fptr) {
2654 size += rb_io_memsize(RFILE(obj)->fptr);
2655 }
2656 break;
2657 case T_RATIONAL:
2658 case T_COMPLEX:
2659 break;
2660 case T_IMEMO:
2661 size += rb_imemo_memsize(obj);
2662 break;
2663
2664 case T_FLOAT:
2665 case T_SYMBOL:
2666 break;
2667
2668 case T_BIGNUM:
2669 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2670 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2671 }
2672 break;
2673
2674 case T_NODE:
2675 UNEXPECTED_NODE(obj_memsize_of);
2676 break;
2677
2678 case T_STRUCT:
2679 if (RSTRUCT_EMBED_LEN(obj) == 0) {
2680 size += sizeof(VALUE) * RSTRUCT_LEN_RAW(obj);
2681 }
2682 break;
2683
2684 case T_ZOMBIE:
2685 case T_MOVED:
2686 break;
2687
2688 default:
2689 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2690 BUILTIN_TYPE(obj), (void*)obj);
2691 }
2692
2693 return size + rb_gc_obj_slot_size(obj);
2694}
2695
2696static int
2697set_zero(st_data_t key, st_data_t val, st_data_t arg)
2698{
2699 VALUE k = (VALUE)key;
2700 VALUE hash = (VALUE)arg;
2701 rb_hash_aset(hash, k, INT2FIX(0));
2702 return ST_CONTINUE;
2703}
2704
2706 size_t counts[T_MASK+1];
2707 size_t freed;
2708 size_t total;
2709};
2710
2711static void
2712count_objects_i(VALUE obj, void *d)
2713{
2714 struct count_objects_data *data = (struct count_objects_data *)d;
2715
2716 if (RBASIC(obj)->flags) {
2717 data->counts[BUILTIN_TYPE(obj)]++;
2718 }
2719 else {
2720 data->freed++;
2721 }
2722
2723 data->total++;
2724}
2725
2726/*
2727 * call-seq:
2728 * ObjectSpace.count_objects(result_hash = {}) -> hash
2729 *
2730 * Counts the number of objects, grouped by type.
2731 *
2732 * It returns a hash that looks like:
2733 *
2734 * {
2735 * TOTAL: 10000,
2736 * FREE: 3011,
2737 * T_OBJECT: 6,
2738 * T_CLASS: 404,
2739 * # ...
2740 * }
2741 *
2742 * The contents of the returned hash are implementation specific and
2743 * may be changed in future versions without notice.
2744 *
2745 * The keys starting with +:T_+ are live objects of a particular type.
2746 * For example, +:T_ARRAY+ is the number of arrays.
2747 *
2748 * The key +:FREE+ is the number of object slots which are empty.
2749 *
2750 * The key +:TOTAL+ is the total number of slots (which is the sum of
2751 * all of the other values).
2752 *
2753 * If the optional argument +result_hash+ is given,
2754 * it is overwritten and returned.
2755 * This is intended to avoid the probe effect.
2756 *
2757 * h = {}
2758 * ObjectSpace.count_objects(h)
2759 * puts h
2760 * # => { TOTAL: 10000, T_CLASS: 158280, T_MODULE: 20672, T_STRING: 527249 }
2761 *
2762 * This method is only expected to work on C Ruby.
2763 *
2764 */
2765
2766static VALUE
2767count_objects(int argc, VALUE *argv, VALUE os)
2768{
2769 struct count_objects_data data = { 0 };
2770 VALUE hash = Qnil;
2771 VALUE types[T_MASK + 1];
2772
2773 if (rb_check_arity(argc, 0, 1) == 1) {
2774 hash = argv[0];
2775 if (!RB_TYPE_P(hash, T_HASH))
2776 rb_raise(rb_eTypeError, "non-hash given");
2777 }
2778
2779 for (size_t i = 0; i <= T_MASK; i++) {
2780 // type_sym can allocate an object,
2781 // so we need to create all key symbols in advance
2782 // not to disturb the result
2783 types[i] = type_sym(i);
2784 }
2785
2786 // Same as type_sym, we need to create all key symbols in advance
2787 VALUE total = ID2SYM(rb_intern("TOTAL"));
2788 VALUE free = ID2SYM(rb_intern("FREE"));
2789
2790 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2791
2792 if (NIL_P(hash)) {
2793 hash = rb_hash_new();
2794 }
2795 else if (!RHASH_EMPTY_P(hash)) {
2796 rb_hash_stlike_foreach(hash, set_zero, hash);
2797 }
2798 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2799 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2800
2801 for (size_t i = 0; i <= T_MASK; i++) {
2802 if (data.counts[i]) {
2803 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2804 }
2805 }
2806
2807 return hash;
2808}
2809
2810#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2811
2812#define STACK_START (ec->machine.stack_start)
2813#define STACK_END (ec->machine.stack_end)
2814#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2815
2816#if STACK_GROW_DIRECTION < 0
2817# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2818#elif STACK_GROW_DIRECTION > 0
2819# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2820#else
2821# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2822 : (size_t)(STACK_END - STACK_START + 1))
2823#endif
2824#if !STACK_GROW_DIRECTION
2825int ruby_stack_grow_direction;
2826int
2827ruby_get_stack_grow_direction(volatile VALUE *addr)
2828{
2829 VALUE *end;
2830 SET_MACHINE_STACK_END(&end);
2831
2832 if (end > addr) return ruby_stack_grow_direction = 1;
2833 return ruby_stack_grow_direction = -1;
2834}
2835#endif
2836
2837size_t
2839{
2840 rb_execution_context_t *ec = GET_EC();
2841 SET_STACK_END;
2842 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2843 return STACK_LENGTH;
2844}
2845
2846#define PREVENT_STACK_OVERFLOW 1
2847#ifndef PREVENT_STACK_OVERFLOW
2848#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2849# define PREVENT_STACK_OVERFLOW 1
2850#else
2851# define PREVENT_STACK_OVERFLOW 0
2852#endif
2853#endif
2854#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2855static int
2856stack_check(rb_execution_context_t *ec, int water_mark)
2857{
2858 SET_STACK_END;
2859
2860 size_t length = STACK_LENGTH;
2861 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2862
2863 return length > maximum_length;
2864}
2865#else
2866#define stack_check(ec, water_mark) FALSE
2867#endif
2868
2869#define STACKFRAME_FOR_CALL_CFUNC 2048
2870
2871int
2872rb_ec_stack_check(rb_execution_context_t *ec)
2873{
2874 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2875}
2876
2877int
2879{
2880 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2881}
2882
2883/* ==================== Marking ==================== */
2884
2885#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2886 if (!RB_SPECIAL_CONST_P(obj)) { \
2887 rb_vm_t *vm = GET_VM(); \
2888 void *objspace = vm->gc.objspace; \
2889 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2890 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2891 (func)(objspace, (obj_or_ptr)); \
2892 } \
2893 else if (check_obj ? \
2894 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2895 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2896 true) { \
2897 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2898 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2899 vm->gc.mark_func_data = NULL; \
2900 mark_func_data->mark_func((obj), mark_func_data->data); \
2901 vm->gc.mark_func_data = mark_func_data; \
2902 } \
2903 } \
2904} while (0)
2905
2906static inline void
2907gc_mark_internal(VALUE obj)
2908{
2909 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2910}
2911
2912void
2913rb_gc_mark_movable(VALUE obj)
2914{
2915 gc_mark_internal(obj);
2916}
2917
2918void
2919rb_gc_mark_and_move(VALUE *ptr)
2920{
2921 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2922}
2923
2924static inline void
2925gc_mark_and_pin_internal(VALUE obj)
2926{
2927 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2928}
2929
2930void
2931rb_gc_mark(VALUE obj)
2932{
2933 gc_mark_and_pin_internal(obj);
2934}
2935
2936static inline void
2937gc_mark_maybe_internal(VALUE obj)
2938{
2939 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2940}
2941
2942void
2943rb_gc_mark_maybe(VALUE obj)
2944{
2945 gc_mark_maybe_internal(obj);
2946}
2947
2948ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2949static void
2950each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2951{
2952 VALUE v;
2953 while (n--) {
2954 v = *x;
2955 cb(v, data);
2956 x++;
2957 }
2958}
2959
2960static void
2961each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2962{
2963 if (end <= start) return;
2964 each_location(start, end - start, cb, data);
2965}
2966
2967static void
2968gc_mark_maybe_each_location(VALUE obj, void *data)
2969{
2970 gc_mark_maybe_internal(obj);
2971}
2972
2973void
2974rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2975{
2976 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2977}
2978
2979void
2980rb_gc_mark_values(long n, const VALUE *values)
2981{
2982 for (long i = 0; i < n; i++) {
2983 gc_mark_internal(values[i]);
2984 }
2985}
2986
2987void
2988rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2989{
2990 for (long i = 0; i < n; i++) {
2991 gc_mark_and_pin_internal(values[i]);
2992 }
2993}
2994
2995static int
2996mark_key(st_data_t key, st_data_t value, st_data_t data)
2997{
2998 gc_mark_and_pin_internal((VALUE)key);
2999
3000 return ST_CONTINUE;
3001}
3002
3003void
3004rb_mark_set(st_table *tbl)
3005{
3006 if (!tbl) return;
3007
3008 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
3009}
3010
3011static int
3012mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
3013{
3014 gc_mark_internal((VALUE)key);
3015 gc_mark_internal((VALUE)value);
3016
3017 return ST_CONTINUE;
3018}
3019
3020static int
3021pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
3022{
3023 gc_mark_and_pin_internal((VALUE)key);
3024 gc_mark_and_pin_internal((VALUE)value);
3025
3026 return ST_CONTINUE;
3027}
3028
3029static int
3030pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
3031{
3032 gc_mark_and_pin_internal((VALUE)key);
3033 gc_mark_internal((VALUE)value);
3034
3035 return ST_CONTINUE;
3036}
3037
3038static void
3039mark_hash(VALUE hash)
3040{
3041 if (rb_hash_compare_by_id_p(hash)) {
3042 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
3043 }
3044 else {
3045 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
3046 }
3047
3048 gc_mark_internal(RHASH(hash)->ifnone);
3049}
3050
3051void
3052rb_mark_hash(st_table *tbl)
3053{
3054 if (!tbl) return;
3055
3056 st_foreach(tbl, pin_key_pin_value, 0);
3057}
3058
3059static enum rb_id_table_iterator_result
3060mark_method_entry_i(VALUE me, void *objspace)
3061{
3062 gc_mark_internal(me);
3063
3064 return ID_TABLE_CONTINUE;
3065}
3066
3067static void
3068mark_m_tbl(void *objspace, struct rb_id_table *tbl)
3069{
3070 if (tbl) {
3071 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
3072 }
3073}
3074
3075static enum rb_id_table_iterator_result
3076mark_const_entry_i(VALUE value, void *objspace)
3077{
3078 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
3079
3080 if (!rb_gc_checking_shareable()) {
3081 gc_mark_internal(ce->value);
3082 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
3083 }
3084 return ID_TABLE_CONTINUE;
3085}
3086
3087static void
3088mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
3089{
3090 if (!tbl) return;
3091 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
3092}
3093
3094#if STACK_GROW_DIRECTION < 0
3095#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
3096#elif STACK_GROW_DIRECTION > 0
3097#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
3098#else
3099#define GET_STACK_BOUNDS(start, end, appendix) \
3100 ((STACK_END < STACK_START) ? \
3101 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
3102#endif
3103
3104static void
3105gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
3106{
3107 gc_mark_maybe_internal(obj);
3108
3109#ifdef RUBY_ASAN_ENABLED
3110 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
3111 void *fake_frame_start;
3112 void *fake_frame_end;
3113 bool is_fake_frame = asan_get_fake_stack_extents(
3114 ec->machine.asan_fake_stack_handle, obj,
3115 ec->machine.stack_start, ec->machine.stack_end,
3116 &fake_frame_start, &fake_frame_end
3117 );
3118 if (is_fake_frame) {
3119 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
3120 }
3121#endif
3122}
3123
3124static bool
3125gc_object_moved_p_internal(void *objspace, VALUE obj)
3126{
3127 if (SPECIAL_CONST_P(obj)) {
3128 return false;
3129 }
3130
3131 return rb_gc_impl_object_moved_p(objspace, obj);
3132}
3133
3134static VALUE
3135gc_location_internal(void *objspace, VALUE value)
3136{
3137 if (SPECIAL_CONST_P(value)) {
3138 return value;
3139 }
3140
3141 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
3142
3143 return rb_gc_impl_location(objspace, value);
3144}
3145
3146VALUE
3147rb_gc_location(VALUE value)
3148{
3149 return gc_location_internal(rb_gc_get_objspace(), value);
3150}
3151
3152#if defined(__wasm__)
3153
3154
3155static VALUE *rb_stack_range_tmp[2];
3156
3157static void
3158rb_mark_locations(void *begin, void *end)
3159{
3160 rb_stack_range_tmp[0] = begin;
3161 rb_stack_range_tmp[1] = end;
3162}
3163
3164void
3165rb_gc_save_machine_context(void)
3166{
3167 // no-op
3168}
3169
3170# if defined(__EMSCRIPTEN__)
3171
3172static void
3173mark_current_machine_context(const rb_execution_context_t *ec)
3174{
3175 emscripten_scan_stack(rb_mark_locations);
3176 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3177
3178 emscripten_scan_registers(rb_mark_locations);
3179 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3180}
3181# else // use Asyncify version
3182
3183static void
3184mark_current_machine_context(rb_execution_context_t *ec)
3185{
3186 VALUE *stack_start, *stack_end;
3187 SET_STACK_END;
3188 GET_STACK_BOUNDS(stack_start, stack_end, 1);
3189 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
3190
3191 rb_wasm_scan_locals(rb_mark_locations);
3192 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3193}
3194
3195# endif
3196
3197#else // !defined(__wasm__)
3198
3199void
3200rb_gc_save_machine_context(void)
3201{
3202 rb_thread_t *thread = GET_THREAD();
3203
3204 RB_VM_SAVE_MACHINE_CONTEXT(thread);
3205}
3206
3207
3208static void
3209mark_current_machine_context(const rb_execution_context_t *ec)
3210{
3211 rb_gc_mark_machine_context(ec);
3212}
3213#endif
3214
3215void
3216rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3217{
3218 VALUE *stack_start, *stack_end;
3219
3220 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3221 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3222
3223 void *data =
3224#ifdef RUBY_ASAN_ENABLED
3225 /* gc_mark_machine_stack_location_maybe() uses data as const */
3227#else
3228 NULL;
3229#endif
3230
3231 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3232 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3233 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3234}
3235
3236static int
3237rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3238{
3239 gc_mark_and_pin_internal((VALUE)value);
3240
3241 return ST_CONTINUE;
3242}
3243
3244void
3245rb_mark_tbl(st_table *tbl)
3246{
3247 if (!tbl || tbl->num_entries == 0) return;
3248
3249 st_foreach(tbl, rb_mark_tbl_i, 0);
3250}
3251
3252static void
3253gc_mark_tbl_no_pin(st_table *tbl)
3254{
3255 if (!tbl || tbl->num_entries == 0) return;
3256
3257 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3258}
3259
3260void
3261rb_mark_tbl_no_pin(st_table *tbl)
3262{
3263 gc_mark_tbl_no_pin(tbl);
3264}
3265
3266static bool
3267gc_declarative_marking_p(const rb_data_type_t *type)
3268{
3269 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3270}
3271
3273rb_gc_get_ec(void)
3274{
3275 void *objspace = rb_gc_get_objspace();
3276
3277 if (RB_LIKELY(rb_gc_impl_during_gc_p(objspace))) {
3278 return rb_gc_impl_get_vm_context(objspace)->ec;
3279 }
3280 else {
3281 return GET_EC();
3282 }
3283}
3284
3285void
3286rb_gc_mark_roots(void *objspace, const char **categoryp)
3287{
3288 rb_execution_context_t *ec = rb_gc_get_ec();
3289 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3290
3291#define MARK_CHECKPOINT(category) do { \
3292 if (categoryp) *categoryp = category; \
3293} while (0)
3294
3295 MARK_CHECKPOINT("vm");
3296 rb_vm_mark(vm);
3297
3298 MARK_CHECKPOINT("end_proc");
3299 rb_mark_end_proc();
3300
3301 MARK_CHECKPOINT("global_tbl");
3302 rb_gc_mark_global_tbl();
3303
3304#if USE_YJIT
3305 void rb_yjit_root_mark(void); // in Rust
3306
3307 if (rb_yjit_enabled_p) {
3308 MARK_CHECKPOINT("YJIT");
3309 rb_yjit_root_mark();
3310 }
3311#endif
3312
3313#if USE_ZJIT
3314 void rb_zjit_root_mark(void);
3315 if (rb_zjit_enabled_p) {
3316 MARK_CHECKPOINT("ZJIT");
3317 rb_zjit_root_mark();
3318 }
3319#endif
3320
3321 MARK_CHECKPOINT("machine_context");
3322 mark_current_machine_context(ec);
3323
3324 MARK_CHECKPOINT("global_symbols");
3325 rb_sym_global_symbols_mark_and_move();
3326
3327 MARK_CHECKPOINT("finish");
3328
3329#undef MARK_CHECKPOINT
3330}
3331
3336
3337static void
3338gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3339{
3341 rb_objspace_t *objspace = foreach_arg->objspace;
3342
3343 if (RCLASSEXT_SUPER(ext)) {
3344 gc_mark_internal(RCLASSEXT_SUPER(ext));
3345 }
3346 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3347
3348 if (!rb_gc_checking_shareable()) {
3349 // unshareable
3350 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3351 gc_mark_internal(RCLASSEXT_CVC_TBL(ext));
3352 }
3353
3354 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3355 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3356 }
3357 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3358 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3359 if (RCLASSEXT_SUBCLASSES(ext)) {
3360 gc_mark_internal(RCLASSEXT_SUBCLASSES(ext));
3361 }
3362 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3363}
3364
3365static void
3366gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3367{
3369 rb_objspace_t *objspace = foreach_arg->objspace;
3370
3371 if (RCLASSEXT_SUPER(ext)) {
3372 gc_mark_internal(RCLASSEXT_SUPER(ext));
3373 }
3374 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3375 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3376 }
3377 if (RCLASSEXT_INCLUDER(ext)) {
3378 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3379 }
3380 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3381 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3382 if (RCLASSEXT_SUBCLASSES(ext)) {
3383 gc_mark_internal(RCLASSEXT_SUBCLASSES(ext));
3384 }
3385}
3386
3387#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3388
3389void
3390rb_gc_move_obj_during_marking(VALUE from, VALUE to)
3391{
3392 if (rb_obj_using_gen_fields_table_p(to)) {
3393 rb_mark_generic_ivar(from);
3394 }
3395}
3396
3397void
3398rb_gc_mark_children(void *objspace, VALUE obj)
3399{
3400 struct gc_mark_classext_foreach_arg foreach_args;
3401
3402 if (rb_obj_using_gen_fields_table_p(obj)) {
3403 rb_mark_generic_ivar(obj);
3404 }
3405
3406 switch (BUILTIN_TYPE(obj)) {
3407 case T_FLOAT:
3408 case T_BIGNUM:
3409 return;
3410
3411 case T_NIL:
3412 case T_FIXNUM:
3413 rb_bug("rb_gc_mark() called for broken object");
3414 break;
3415
3416 case T_NODE:
3417 UNEXPECTED_NODE(rb_gc_mark);
3418 break;
3419
3420 case T_IMEMO:
3421 rb_imemo_mark_and_move(obj, false);
3422 return;
3423
3424 default:
3425 break;
3426 }
3427
3428 gc_mark_internal(RBASIC(obj)->klass);
3429
3430 switch (BUILTIN_TYPE(obj)) {
3431 case T_CLASS:
3432 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3433 !rb_gc_checking_shareable()) {
3434 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3435 }
3436 // Continue to the shared T_CLASS/T_MODULE
3437 case T_MODULE:
3438 foreach_args.objspace = objspace;
3439 foreach_args.obj = obj;
3440 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3441 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3442 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3443 }
3444 break;
3445
3446 case T_ICLASS:
3447 foreach_args.objspace = objspace;
3448 foreach_args.obj = obj;
3449 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3450 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3451 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3452 }
3453 break;
3454
3455 case T_ARRAY:
3456 if (ARY_SHARED_P(obj)) {
3457 VALUE root = ARY_SHARED_ROOT(obj);
3458 gc_mark_internal(root);
3459 }
3460 else {
3461 long len = RARRAY_LEN(obj);
3462 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3463 for (long i = 0; i < len; i++) {
3464 gc_mark_internal(ptr[i]);
3465 }
3466 }
3467 break;
3468
3469 case T_HASH:
3470 mark_hash(obj);
3471 break;
3472
3473 case T_SYMBOL:
3474 gc_mark_internal(RSYMBOL(obj)->fstr);
3475 break;
3476
3477 case T_STRING:
3478 if (STR_SHARED_P(obj)) {
3479 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3480 /* Embedded shared strings cannot be moved because this string
3481 * points into the slot of the shared string. There may be code
3482 * using the RSTRING_PTR on the stack, which would pin this
3483 * string but not pin the shared string, causing it to move. */
3484 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3485 }
3486 else {
3487 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3488 }
3489 }
3490 break;
3491
3492 case T_DATA: {
3493 bool typed_data = RTYPEDDATA_P(obj);
3494 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3495
3496 if (typed_data) {
3497 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3498 }
3499
3500 if (ptr) {
3501 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3502 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3503
3504 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3505 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3506 }
3507 }
3508 else {
3509 RUBY_DATA_FUNC mark_func = typed_data ?
3511 RDATA(obj)->dmark;
3512 if (mark_func) (*mark_func)(ptr);
3513 }
3514 }
3515
3516 break;
3517 }
3518
3519 case T_OBJECT: {
3520 uint32_t len;
3521 if (rb_obj_shape_complex_p(obj)) {
3522 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3523 len = ROBJECT_FIELDS_COUNT_COMPLEX(obj);
3524 }
3525 else {
3526 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3527
3528 len = ROBJECT_FIELDS_COUNT_NOT_COMPLEX(obj);
3529 for (uint32_t i = 0; i < len; i++) {
3530 gc_mark_internal(ptr[i]);
3531 }
3532 }
3533 break;
3534 }
3535
3536 case T_FILE:
3537 if (RFILE(obj)->fptr) {
3538 gc_mark_internal(RFILE(obj)->fptr->self);
3539 gc_mark_internal(RFILE(obj)->fptr->pathv);
3540 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3541 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3542 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3543 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3544 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3545 gc_mark_internal(RFILE(obj)->fptr->timeout);
3546 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3547 }
3548 break;
3549
3550 case T_REGEXP:
3551 gc_mark_internal(RREGEXP(obj)->src);
3552 break;
3553
3554 case T_MATCH:
3555 gc_mark_internal(RMATCH(obj)->regexp);
3556 if (RMATCH(obj)->str) {
3557 gc_mark_internal(RMATCH(obj)->str);
3558 }
3559 break;
3560
3561 case T_RATIONAL:
3562 gc_mark_internal(RRATIONAL(obj)->num);
3563 gc_mark_internal(RRATIONAL(obj)->den);
3564 break;
3565
3566 case T_COMPLEX:
3567 gc_mark_internal(RCOMPLEX(obj)->real);
3568 gc_mark_internal(RCOMPLEX(obj)->imag);
3569 break;
3570
3571 case T_STRUCT: {
3572 const long len = RSTRUCT_LEN(obj);
3573 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3574
3575 for (long i = 0; i < len; i++) {
3576 gc_mark_internal(ptr[i]);
3577 }
3578
3579 if (rb_obj_shape_has_fields(obj) && !FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3580 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3581 }
3582
3583 break;
3584 }
3585
3586 default:
3587 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3588 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3589 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3590 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3591 BUILTIN_TYPE(obj), (void *)obj,
3592 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3593 }
3594}
3595
3596size_t
3597rb_gc_obj_optimal_size(VALUE obj)
3598{
3599 switch (BUILTIN_TYPE(obj)) {
3600 case T_ARRAY:
3601 {
3602 size_t size = rb_ary_size_as_embedded(obj);
3603 if (rb_gc_size_allocatable_p(size)) {
3604 return size;
3605 }
3606 else {
3607 return sizeof(struct RArray);
3608 }
3609 }
3610
3611 case T_OBJECT:
3612 if (rb_obj_shape_complex_p(obj)) {
3613 return sizeof(struct RObject);
3614 }
3615 else {
3616 size_t size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3617 if (rb_gc_size_allocatable_p(size)) {
3618 return size;
3619 }
3620 else {
3621 return sizeof(struct RObject);
3622 }
3623 }
3624
3625 case T_STRING:
3626 {
3627 size_t size = rb_str_size_as_embedded(obj);
3628 if (rb_gc_size_allocatable_p(size)) {
3629 return size;
3630 }
3631 else {
3632 return sizeof(struct RString);
3633 }
3634 }
3635
3636 case T_HASH:
3637 {
3638 if (RB_OBJ_FROZEN(obj) && RHASH_AR_TABLE_P(obj)) {
3639 return sizeof(struct RHash) + offsetof(ar_table, pairs) + RHASH_AR_TABLE_BOUND(obj) * sizeof(ar_table_pair);
3640 }
3641 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3642 }
3643
3644 default:
3645 return 0;
3646 }
3647}
3648
3649void
3650rb_gc_writebarrier(VALUE a, VALUE b)
3651{
3652 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3653}
3654
3655void
3656rb_gc_writebarrier_unprotect(VALUE obj)
3657{
3658 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3659}
3660
3661/*
3662 * remember `obj' if needed.
3663 */
3664void
3665rb_gc_writebarrier_remember(VALUE obj)
3666{
3667 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3668}
3669
3670void
3671rb_gc_copy_attributes(VALUE dest, VALUE obj)
3672{
3673 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3674}
3675
3676int
3677rb_gc_modular_gc_loaded_p(void)
3678{
3679#if USE_MODULAR_GC
3680 return rb_gc_functions.modular_gc_loaded_p;
3681#else
3682 return false;
3683#endif
3684}
3685
3686const char *
3687rb_gc_active_gc_name(void)
3688{
3689 const char *gc_name = rb_gc_impl_active_gc_name();
3690
3691 const size_t len = strlen(gc_name);
3692 if (len > RB_GC_MAX_NAME_LEN) {
3693 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3694 RB_GC_MAX_NAME_LEN, len, gc_name);
3695 }
3696
3697 return gc_name;
3698}
3699
3701rb_gc_object_metadata(VALUE obj)
3702{
3703 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3704}
3705
3706/* GC */
3707
3708void *
3709rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3710{
3711 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3712}
3713
3714void
3715rb_gc_ractor_cache_free(void *cache)
3716{
3717 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3718}
3719
3720void
3721rb_gc_register_mark_object(VALUE obj)
3722{
3723 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3724 return;
3725
3726 rb_vm_register_global_object(obj);
3727}
3728
3729void
3730rb_gc_register_address(VALUE *addr)
3731{
3732 rb_vm_t *vm = GET_VM();
3733
3734 VALUE obj = *addr;
3735
3736 struct global_object_list *tmp = ALLOC(struct global_object_list);
3737 RB_VM_LOCKING() {
3738 tmp->next = vm->global_object_list;
3739 tmp->varptr = addr;
3740 vm->global_object_list = tmp;
3741 }
3742
3743 /*
3744 * Because some C extensions have assignment-then-register bugs,
3745 * we guard `obj` here so that it would not get swept defensively.
3746 */
3747 RB_GC_GUARD(obj);
3748 if (0 && !SPECIAL_CONST_P(obj)) {
3749 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3750 rb_obj_class(obj));
3751 rb_print_backtrace(stderr);
3752 }
3753}
3754
3755void
3756rb_gc_unregister_address(VALUE *addr)
3757{
3758 rb_vm_t *vm = GET_VM();
3759 struct global_object_list *tmp;
3760 RB_VM_LOCKING() {
3761 tmp = vm->global_object_list;
3762 if (tmp->varptr == addr) {
3763 vm->global_object_list = tmp->next;
3764 SIZED_FREE(tmp);
3765 }
3766 else {
3767 while (tmp->next) {
3768 if (tmp->next->varptr == addr) {
3769 struct global_object_list *t = tmp->next;
3770
3771 tmp->next = tmp->next->next;
3772 SIZED_FREE(t);
3773 break;
3774 }
3775 tmp = tmp->next;
3776 }
3777 }
3778 }
3779}
3780
3781void
3783{
3784 rb_gc_register_address(var);
3785}
3786
3787static VALUE
3788gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3789{
3790 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3791
3792 return Qnil;
3793}
3794
3795/*
3796 * rb_objspace_each_objects() is special C API to walk through
3797 * Ruby object space. This C API is too difficult to use it.
3798 * To be frank, you should not use it. Or you need to read the
3799 * source code of this function and understand what this function does.
3800 *
3801 * 'callback' will be called several times (the number of heap page,
3802 * at current implementation) with:
3803 * vstart: a pointer to the first living object of the heap_page.
3804 * vend: a pointer to next to the valid heap_page area.
3805 * stride: a distance to next VALUE.
3806 *
3807 * If callback() returns non-zero, the iteration will be stopped.
3808 *
3809 * This is a sample callback code to iterate liveness objects:
3810 *
3811 * static int
3812 * sample_callback(void *vstart, void *vend, int stride, void *data)
3813 * {
3814 * VALUE v = (VALUE)vstart;
3815 * for (; v != (VALUE)vend; v += stride) {
3816 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3817 * // do something with live object 'v'
3818 * }
3819 * }
3820 * return 0; // continue to iteration
3821 * }
3822 *
3823 * Note: 'vstart' is not a top of heap_page. This point the first
3824 * living object to grasp at least one object to avoid GC issue.
3825 * This means that you can not walk through all Ruby object page
3826 * including freed object page.
3827 *
3828 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3829 * However, there are possibilities to pass variable values with
3830 * 'stride' with some reasons. You must use stride instead of
3831 * use some constant value in the iteration.
3832 */
3833void
3834rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3835{
3836 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3837}
3838
3839static void
3840gc_ref_update_array(void *objspace, VALUE v)
3841{
3842 if (ARY_SHARED_P(v)) {
3843 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3844
3845 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3846
3847 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3848 // If the root is embedded and its location has changed
3849 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3850 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3851 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3852 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3853 }
3854 }
3855 else {
3856 long len = RARRAY_LEN(v);
3857
3858 if (len > 0) {
3859 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3860 for (long i = 0; i < len; i++) {
3861 UPDATE_IF_MOVED(objspace, ptr[i]);
3862 }
3863 }
3864
3865 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3866 if (rb_ary_embeddable_p(v)) {
3867 rb_ary_make_embedded(v);
3868 }
3869 }
3870 }
3871}
3872
3873static void
3874gc_ref_update_object(void *objspace, VALUE v)
3875{
3876 VALUE *ptr = ROBJECT_FIELDS(v);
3877
3878 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3879 if (rb_obj_shape_complex_p(v)) {
3880 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3881 return;
3882 }
3883
3884 size_t slot_size = rb_gc_obj_slot_size(v);
3885 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3886 if (slot_size >= embed_size) {
3887 // Object can be re-embedded
3888 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3889 SIZED_FREE_N(ptr, ROBJECT_FIELDS_CAPACITY(v));
3890 FL_UNSET_RAW(v, ROBJECT_HEAP);
3891 ptr = ROBJECT(v)->as.ary;
3892 }
3893 }
3894
3895 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3896 UPDATE_IF_MOVED(objspace, ptr[i]);
3897 }
3898}
3899
3900void
3901rb_gc_ref_update_table_values_only(st_table *tbl)
3902{
3903 gc_ref_update_table_values_only(tbl);
3904}
3905
3906/* Update MOVED references in a VALUE=>VALUE st_table */
3907void
3908rb_gc_update_tbl_refs(st_table *ptr)
3909{
3910 gc_update_table_refs(ptr);
3911}
3912
3913static void
3914gc_ref_update_hash(void *objspace, VALUE v)
3915{
3916 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3917}
3918
3919static void
3920gc_update_values(void *objspace, long n, VALUE *values)
3921{
3922 for (long i = 0; i < n; i++) {
3923 UPDATE_IF_MOVED(objspace, values[i]);
3924 }
3925}
3926
3927void
3928rb_gc_update_values(long n, VALUE *values)
3929{
3930 gc_update_values(rb_gc_get_objspace(), n, values);
3931}
3932
3933static enum rb_id_table_iterator_result
3934check_id_table_move(VALUE value, void *data)
3935{
3936 void *objspace = (void *)data;
3937
3938 if (gc_object_moved_p_internal(objspace, (VALUE)value)) {
3939 return ID_TABLE_REPLACE;
3940 }
3941
3942 return ID_TABLE_CONTINUE;
3943}
3944
3945void
3946rb_gc_prepare_heap_process_object(VALUE obj)
3947{
3948 switch (BUILTIN_TYPE(obj)) {
3949 case T_STRING:
3950 // Precompute the string coderange. This both save time for when it will be
3951 // eventually needed, and avoid mutating heap pages after a potential fork.
3952 rb_enc_str_coderange(obj);
3953 break;
3954 default:
3955 break;
3956 }
3957}
3958
3959void
3960rb_gc_prepare_heap(void)
3961{
3962 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3963}
3964
3965size_t
3966rb_gc_heap_id_for_size(size_t size)
3967{
3968 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3969}
3970
3971bool
3972rb_gc_size_allocatable_p(size_t size)
3973{
3974 return rb_gc_impl_size_allocatable_p(size);
3975}
3976
3977static enum rb_id_table_iterator_result
3978update_id_table(VALUE *value, void *data, int existing)
3979{
3980 void *objspace = (void *)data;
3981
3982 if (gc_object_moved_p_internal(objspace, (VALUE)*value)) {
3983 *value = gc_location_internal(objspace, (VALUE)*value);
3984 }
3985
3986 return ID_TABLE_CONTINUE;
3987}
3988
3989static void
3990update_m_tbl(void *objspace, struct rb_id_table *tbl)
3991{
3992 if (tbl) {
3993 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3994 }
3995}
3996
3997static enum rb_id_table_iterator_result
3998update_const_tbl_i(VALUE value, void *objspace)
3999{
4000 rb_const_entry_t *ce = (rb_const_entry_t *)value;
4001
4002 if (gc_object_moved_p_internal(objspace, ce->value)) {
4003 ce->value = gc_location_internal(objspace, ce->value);
4004 }
4005
4006 if (gc_object_moved_p_internal(objspace, ce->file)) {
4007 ce->file = gc_location_internal(objspace, ce->file);
4008 }
4009
4010 return ID_TABLE_CONTINUE;
4011}
4012
4013static void
4014update_const_tbl(void *objspace, struct rb_id_table *tbl)
4015{
4016 if (!tbl) return;
4017 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
4018}
4019
4020static void
4021update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
4022{
4023 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
4024 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
4025 for (size_t i = 0; i < array_size; i++) {
4026 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
4027 }
4028 }
4029}
4030
4031static void
4032update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
4033{
4034 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
4035 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
4036 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
4037 if (is_iclass) {
4038 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
4039 }
4040}
4041
4042static void
4043update_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4044{
4045 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4046 rb_objspace_t *objspace = args->objspace;
4047
4048 if (RCLASSEXT_SUPER(ext)) {
4049 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4050 }
4051
4052 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4053
4054 UPDATE_IF_MOVED(objspace, ext->fields_obj);
4055 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
4056 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
4057 }
4058 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4059 UPDATE_IF_MOVED(objspace, RCLASSEXT_CVC_TBL(ext));
4060 update_superclasses(objspace, ext);
4061 if (RCLASSEXT_SUBCLASSES(ext)) {
4062 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUBCLASSES(ext));
4063 }
4064
4065 update_classext_values(objspace, ext, false);
4066}
4067
4068static void
4069update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4070{
4071 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4072 rb_objspace_t *objspace = args->objspace;
4073
4074 if (RCLASSEXT_SUPER(ext)) {
4075 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4076 }
4077 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4078 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
4079 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4080 UPDATE_IF_MOVED(objspace, RCLASSEXT_CVC_TBL(ext));
4081 if (RCLASSEXT_SUBCLASSES(ext)) {
4082 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUBCLASSES(ext));
4083 }
4084
4085 update_classext_values(objspace, ext, true);
4086}
4087
4089 vm_table_foreach_callback_func callback;
4090 vm_table_update_callback_func update_callback;
4091 void *data;
4092 bool weak_only;
4093};
4094
4095static int
4096vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
4097{
4098 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4099
4100 int ret = iter_data->callback((VALUE)key, iter_data->data);
4101
4102 if (!iter_data->weak_only) {
4103 if (ret != ST_CONTINUE) return ret;
4104
4105 ret = iter_data->callback((VALUE)value, iter_data->data);
4106 }
4107
4108 return ret;
4109}
4110
4111static int
4112vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4113{
4114 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4115
4116 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
4117
4118 if (!iter_data->weak_only) {
4119 if (ret != ST_CONTINUE) return ret;
4120
4121 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
4122 }
4123
4124 return ret;
4125}
4126
4127static int
4128vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
4129{
4130 VALUE sym = *sym_ptr;
4131 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4132
4133 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
4134
4135 int ret = iter_data->callback(sym, iter_data->data);
4136
4137 if (ret == ST_REPLACE) {
4138 ret = iter_data->update_callback(sym_ptr, iter_data->data);
4139 }
4140
4141 return ret;
4142}
4143
4144struct st_table *rb_generic_fields_tbl_get(void);
4145
4146static int
4147vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
4148{
4149 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4150
4151 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
4152 int ret = iter_data->callback((VALUE)key, iter_data->data);
4153 if (ret != ST_CONTINUE) return ret;
4154 }
4155
4156 return iter_data->callback((VALUE)value, iter_data->data);
4157}
4158
4159static int
4160vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4161{
4162 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4163
4164 iter_data->update_callback((VALUE *)value, iter_data->data);
4165
4166 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
4167 iter_data->update_callback((VALUE *)key, iter_data->data);
4168 }
4169
4170 return ST_CONTINUE;
4171}
4172
4173static int
4174vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
4175{
4176 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4177
4178 int ret = iter_data->callback((VALUE)key, iter_data->data);
4179
4180 VALUE new_value = (VALUE)value;
4181 VALUE new_key = (VALUE)key;
4182
4183 switch (ret) {
4184 case ST_CONTINUE:
4185 break;
4186
4187 case ST_DELETE:
4188 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
4189 return ST_DELETE;
4190
4191 case ST_REPLACE: {
4192 ret = iter_data->update_callback(&new_key, iter_data->data);
4193 if (key != new_key) {
4194 ret = ST_DELETE;
4195 }
4196 break;
4197 }
4198
4199 default:
4200 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
4201 }
4202
4203 if (!iter_data->weak_only) {
4204 int ivar_ret = iter_data->callback(new_value, iter_data->data);
4205 switch (ivar_ret) {
4206 case ST_CONTINUE:
4207 break;
4208
4209 case ST_REPLACE:
4210 iter_data->update_callback(&new_value, iter_data->data);
4211 break;
4212
4213 default:
4214 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4215 }
4216 }
4217
4218 if (key != new_key || value != new_value) {
4219 DURING_GC_COULD_MALLOC_REGION_START();
4220 {
4221 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
4222 }
4223 DURING_GC_COULD_MALLOC_REGION_END();
4224 }
4225
4226 return ret;
4227}
4228
4229static int
4230vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
4231{
4232 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4233 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4234 int retval = iter_data->callback(*str, iter_data->data);
4235
4236 if (retval == ST_REPLACE) {
4237 retval = iter_data->update_callback(str, iter_data->data);
4238 }
4239
4240 if (retval == ST_DELETE) {
4241 FL_UNSET(*str, RSTRING_FSTR);
4242 }
4243
4244 return retval;
4245}
4246
4247void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4248void
4249rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4250 vm_table_update_callback_func update_callback,
4251 void *data,
4252 bool weak_only,
4253 enum rb_gc_vm_weak_tables table)
4254{
4255 rb_vm_t *vm = GET_VM();
4256
4257 struct global_vm_table_foreach_data foreach_data = {
4258 .callback = callback,
4259 .update_callback = update_callback,
4260 .data = data,
4261 .weak_only = weak_only,
4262 };
4263
4264 switch (table) {
4265 case RB_GC_VM_CI_TABLE: {
4266 st_foreach_with_replace(
4267 &vm->ci_table,
4268 vm_weak_table_foreach_weak_key,
4269 vm_weak_table_foreach_update_weak_key,
4270 (st_data_t)&foreach_data
4271 );
4272 break;
4273 }
4274 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4275 st_foreach_with_replace(
4276 &vm->overloaded_cme_table,
4277 vm_weak_table_foreach_weak_key,
4278 vm_weak_table_foreach_update_weak_key,
4279 (st_data_t)&foreach_data
4280 );
4281 break;
4282 }
4283 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4284 rb_sym_global_symbol_table_foreach_weak_reference(
4285 vm_weak_table_sym_set_foreach,
4286 &foreach_data
4287 );
4288 break;
4289 }
4290 case RB_GC_VM_ID2REF_TABLE: {
4291 if (id2ref_tbl) {
4292 st_foreach_with_replace(
4293 id2ref_tbl,
4294 vm_weak_table_id2ref_foreach,
4295 vm_weak_table_id2ref_foreach_update,
4296 (st_data_t)&foreach_data
4297 );
4298 }
4299 break;
4300 }
4301 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4302 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4303 if (generic_fields_tbl) {
4304 st_foreach(
4305 generic_fields_tbl,
4306 vm_weak_table_gen_fields_foreach,
4307 (st_data_t)&foreach_data
4308 );
4309 }
4310 break;
4311 }
4312 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4313 rb_fstring_foreach_with_replace(
4314 vm_weak_table_frozen_strings_foreach,
4315 &foreach_data
4316 );
4317 break;
4318 }
4319 case RB_GC_VM_WEAK_TABLE_COUNT:
4320 rb_bug("Unreachable");
4321 default:
4322 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4323 }
4324}
4325
4326void
4327rb_gc_update_vm_references(void *objspace)
4328{
4329 rb_execution_context_t *ec = GET_EC();
4330 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4331
4332 rb_vm_update_references(vm);
4333 rb_gc_update_global_tbl();
4334 rb_sym_global_symbols_mark_and_move();
4335
4336#if USE_YJIT
4337 void rb_yjit_root_update_references(void); // in Rust
4338
4339 if (rb_yjit_enabled_p) {
4340 rb_yjit_root_update_references();
4341 }
4342#endif
4343
4344#if USE_ZJIT
4345 void rb_zjit_root_update_references(void); // in Rust
4346
4347 if (rb_zjit_enabled_p) {
4348 rb_zjit_root_update_references();
4349 }
4350#endif
4351}
4352
4353void
4354rb_gc_update_object_references(void *objspace, VALUE obj)
4355{
4356 struct classext_foreach_args args;
4357
4358 switch (BUILTIN_TYPE(obj)) {
4359 case T_CLASS:
4360 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4361 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4362 }
4363 // Continue to the shared T_CLASS/T_MODULE
4364 case T_MODULE:
4365 args.klass = obj;
4366 args.objspace = objspace;
4367 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4368 break;
4369
4370 case T_ICLASS:
4371 args.objspace = objspace;
4372 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4373 break;
4374
4375 case T_IMEMO:
4376 rb_imemo_mark_and_move(obj, true);
4377 return;
4378
4379 case T_NIL:
4380 case T_FIXNUM:
4381 case T_NODE:
4382 case T_MOVED:
4383 case T_NONE:
4384 /* These can't move */
4385 return;
4386
4387 case T_ARRAY:
4388 gc_ref_update_array(objspace, obj);
4389 break;
4390
4391 case T_HASH:
4392 gc_ref_update_hash(objspace, obj);
4393 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4394 break;
4395
4396 case T_STRING:
4397 {
4398 if (STR_SHARED_P(obj)) {
4399 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4400 }
4401
4402 /* If, after move the string is not embedded, and can fit in the
4403 * slot it's been placed in, then re-embed it. */
4404 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4405 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4406 rb_str_make_embedded(obj);
4407 }
4408 }
4409
4410 break;
4411 }
4412 case T_DATA:
4413 /* Call the compaction callback, if it exists */
4414 {
4415 bool typed_data = RTYPEDDATA_P(obj);
4416 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4417
4418 if (typed_data) {
4419 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4420 }
4421
4422 if (ptr) {
4423 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4424 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4425
4426 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4427 VALUE *ref = (VALUE *)((char *)ptr + offset);
4428 *ref = gc_location_internal(objspace, *ref);
4429 }
4430 }
4431 else if (typed_data) {
4432 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4433 if (compact_func) (*compact_func)(ptr);
4434 }
4435 }
4436 }
4437 break;
4438
4439 case T_OBJECT:
4440 gc_ref_update_object(objspace, obj);
4441 break;
4442
4443 case T_FILE:
4444 if (RFILE(obj)->fptr) {
4445 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4446 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4447 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4448 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4449 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4450 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4451 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4452 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4453 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4454 }
4455 break;
4456 case T_REGEXP:
4457 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4458 break;
4459
4460 case T_SYMBOL:
4461 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4462 break;
4463
4464 case T_FLOAT:
4465 case T_BIGNUM:
4466 break;
4467
4468 case T_MATCH:
4469 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4470
4471 if (RMATCH(obj)->str) {
4472 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4473 }
4474 break;
4475
4476 case T_RATIONAL:
4477 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4478 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4479 break;
4480
4481 case T_COMPLEX:
4482 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4483 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4484
4485 break;
4486
4487 case T_STRUCT:
4488 {
4489 long i, len = RSTRUCT_LEN(obj);
4490 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4491
4492 for (i = 0; i < len; i++) {
4493 UPDATE_IF_MOVED(objspace, ptr[i]);
4494 }
4495
4496 if (RSTRUCT_EMBED_LEN(obj)) {
4497 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4498 UPDATE_IF_MOVED(objspace, ptr[len]);
4499 }
4500 }
4501 else {
4502 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4503 }
4504 }
4505 break;
4506 default:
4507 rb_bug("unreachable");
4508 break;
4509 }
4510
4511 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4512}
4513
4514VALUE
4515rb_gc_start(void)
4516{
4517 rb_gc();
4518 return Qnil;
4519}
4520
4521void
4522rb_gc(void)
4523{
4524 unless_objspace(objspace) { return; }
4525
4526 rb_gc_impl_start(objspace, true, true, true, false);
4527}
4528
4529int
4530rb_during_gc(void)
4531{
4532 unless_objspace(objspace) { return FALSE; }
4533
4534 return rb_gc_impl_during_gc_p(objspace);
4535}
4536
4537size_t
4538rb_gc_count(void)
4539{
4540 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4541}
4542
4543static VALUE
4544gc_count(rb_execution_context_t *ec, VALUE self)
4545{
4546 return SIZET2NUM(rb_gc_count());
4547}
4548
4549VALUE
4550rb_gc_latest_gc_info(VALUE key)
4551{
4552 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4553 rb_raise(rb_eTypeError, "non-hash or symbol given");
4554 }
4555
4556 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4557
4558 if (val == Qundef) {
4559 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4560 }
4561
4562 return val;
4563}
4564
4565static VALUE
4566gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4567{
4568 if (NIL_P(arg)) {
4569 arg = rb_hash_new();
4570 }
4571 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4572 rb_raise(rb_eTypeError, "non-hash or symbol given");
4573 }
4574
4575 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4576
4577 if (ret == Qundef) {
4578 GC_ASSERT(SYMBOL_P(arg));
4579
4580 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4581 }
4582
4583 return ret;
4584}
4585
4586size_t
4587rb_gc_stat(VALUE arg)
4588{
4589 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4590 rb_raise(rb_eTypeError, "non-hash or symbol given");
4591 }
4592
4593 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4594
4595 if (ret == Qundef) {
4596 GC_ASSERT(SYMBOL_P(arg));
4597
4598 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4599 }
4600
4601 if (SYMBOL_P(arg)) {
4602 return NUM2SIZET(ret);
4603 }
4604 else {
4605 return 0;
4606 }
4607}
4608
4609static VALUE
4610gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4611{
4612 if (NIL_P(arg)) {
4613 arg = rb_hash_new();
4614 }
4615
4616 if (NIL_P(heap_name)) {
4617 if (!RB_TYPE_P(arg, T_HASH)) {
4618 rb_raise(rb_eTypeError, "non-hash given");
4619 }
4620 }
4621 else if (FIXNUM_P(heap_name)) {
4622 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4623 rb_raise(rb_eTypeError, "non-hash or symbol given");
4624 }
4625 }
4626 else {
4627 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4628 }
4629
4630 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4631
4632 if (ret == Qundef) {
4633 GC_ASSERT(SYMBOL_P(arg));
4634
4635 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4636 }
4637
4638 return ret;
4639}
4640
4641static VALUE
4642gc_config_get(rb_execution_context_t *ec, VALUE self)
4643{
4644 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4645 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4646
4647 return cfg_hash;
4648}
4649
4650static VALUE
4651gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4652{
4653 void *objspace = rb_gc_get_objspace();
4654
4655 rb_gc_impl_config_set(objspace, hash);
4656
4657 return Qnil;
4658}
4659
4660static VALUE
4661gc_stress_get(rb_execution_context_t *ec, VALUE self)
4662{
4663 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4664}
4665
4666static VALUE
4667gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4668{
4669 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4670
4671 return flag;
4672}
4673
4674void
4675rb_gc_initial_stress_set(VALUE flag)
4676{
4677 initial_stress = flag;
4678}
4679
4680size_t *
4681rb_gc_heap_sizes(void)
4682{
4683 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4684}
4685
4686VALUE
4687rb_gc_enable(void)
4688{
4689 return rb_objspace_gc_enable(rb_gc_get_objspace());
4690}
4691
4692VALUE
4693rb_objspace_gc_enable(void *objspace)
4694{
4695 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4696 rb_gc_impl_gc_enable(objspace);
4697 return RBOOL(disabled);
4698}
4699
4700static VALUE
4701gc_enable(rb_execution_context_t *ec, VALUE _)
4702{
4703 return rb_gc_enable();
4704}
4705
4706static VALUE
4707gc_disable_no_rest(void *objspace)
4708{
4709 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4710 rb_gc_impl_gc_disable(objspace, false);
4711 return RBOOL(disabled);
4712}
4713
4714VALUE
4715rb_gc_disable_no_rest(void)
4716{
4717 return gc_disable_no_rest(rb_gc_get_objspace());
4718}
4719
4720VALUE
4721rb_gc_disable(void)
4722{
4723 return rb_objspace_gc_disable(rb_gc_get_objspace());
4724}
4725
4726VALUE
4727rb_objspace_gc_disable(void *objspace)
4728{
4729 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4730 rb_gc_impl_gc_disable(objspace, true);
4731 return RBOOL(disabled);
4732}
4733
4734static VALUE
4735gc_disable(rb_execution_context_t *ec, VALUE _)
4736{
4737 return rb_gc_disable();
4738}
4739
4740// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4741void
4742ruby_gc_set_params(void)
4743{
4744 rb_gc_impl_set_params(rb_gc_get_objspace());
4745}
4746
4747void
4748rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4749{
4750 RB_VM_LOCKING() {
4751 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4752
4753 if (!RB_SPECIAL_CONST_P(obj)) {
4754 rb_vm_t *vm = GET_VM();
4755 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4756 struct gc_mark_func_data_struct mfd = {
4757 .mark_func = func,
4758 .data = data,
4759 };
4760
4761 vm->gc.mark_func_data = &mfd;
4762 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4763 vm->gc.mark_func_data = prev_mfd;
4764 }
4765 }
4766}
4767
4769 const char *category;
4770 void (*func)(const char *category, VALUE, void *);
4771 void *data;
4772};
4773
4774static void
4775root_objects_from(VALUE obj, void *ptr)
4776{
4777 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4778 (*data->func)(data->category, obj, data->data);
4779}
4780
4781void
4782rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4783{
4784 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4785
4786 rb_vm_t *vm = GET_VM();
4787
4788 struct root_objects_data data = {
4789 .func = func,
4790 .data = passing_data,
4791 };
4792
4793 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4794 struct gc_mark_func_data_struct mfd = {
4795 .mark_func = root_objects_from,
4796 .data = &data,
4797 };
4798
4799 vm->gc.mark_func_data = &mfd;
4800 rb_gc_save_machine_context();
4801 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4802 vm->gc.mark_func_data = prev_mfd;
4803}
4804
4805/*
4806 ------------------------------ DEBUG ------------------------------
4807*/
4808
4809static const char *
4810type_name(int type, VALUE obj)
4811{
4812 switch (type) {
4813#define TYPE_NAME(t) case (t): return #t;
4814 TYPE_NAME(T_NONE);
4815 TYPE_NAME(T_OBJECT);
4816 TYPE_NAME(T_CLASS);
4817 TYPE_NAME(T_MODULE);
4818 TYPE_NAME(T_FLOAT);
4819 TYPE_NAME(T_STRING);
4820 TYPE_NAME(T_REGEXP);
4821 TYPE_NAME(T_ARRAY);
4822 TYPE_NAME(T_HASH);
4823 TYPE_NAME(T_STRUCT);
4824 TYPE_NAME(T_BIGNUM);
4825 TYPE_NAME(T_FILE);
4826 TYPE_NAME(T_MATCH);
4827 TYPE_NAME(T_COMPLEX);
4828 TYPE_NAME(T_RATIONAL);
4829 TYPE_NAME(T_NIL);
4830 TYPE_NAME(T_TRUE);
4831 TYPE_NAME(T_FALSE);
4832 TYPE_NAME(T_SYMBOL);
4833 TYPE_NAME(T_FIXNUM);
4834 TYPE_NAME(T_UNDEF);
4835 TYPE_NAME(T_IMEMO);
4836 TYPE_NAME(T_ICLASS);
4837 TYPE_NAME(T_MOVED);
4838 TYPE_NAME(T_ZOMBIE);
4839 case T_DATA:
4840 if (obj && rb_objspace_data_type_name(obj)) {
4841 return rb_objspace_data_type_name(obj);
4842 }
4843 return "T_DATA";
4844#undef TYPE_NAME
4845 }
4846 return "unknown";
4847}
4848
4849static const char *
4850obj_type_name(VALUE obj)
4851{
4852 return type_name(TYPE(obj), obj);
4853}
4854
4855const char *
4856rb_method_type_name(rb_method_type_t type)
4857{
4858 switch (type) {
4859 case VM_METHOD_TYPE_ISEQ: return "iseq";
4860 case VM_METHOD_TYPE_ATTRSET: return "attrset";
4861 case VM_METHOD_TYPE_IVAR: return "ivar";
4862 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4863 case VM_METHOD_TYPE_ALIAS: return "alias";
4864 case VM_METHOD_TYPE_REFINED: return "refined";
4865 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4866 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4867 case VM_METHOD_TYPE_MISSING: return "missing";
4868 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4869 case VM_METHOD_TYPE_UNDEF: return "undef";
4870 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4871 }
4872 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4873}
4874
4875static void
4876rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4877{
4878 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4879 VALUE path = rb_iseq_path(iseq);
4880 int n = ISEQ_BODY(iseq)->location.first_lineno;
4881 snprintf(buff, buff_size, " %s@%s:%d",
4882 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4883 RSTRING_PTR(path), n);
4884 }
4885}
4886
4887static int
4888str_len_no_raise(VALUE str)
4889{
4890 long len = RSTRING_LEN(str);
4891 if (len < 0) return 0;
4892 if (len > INT_MAX) return INT_MAX;
4893 return (int)len;
4894}
4895
4896#define BUFF_ARGS buff + pos, buff_size - pos
4897#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4898#define APPEND_S(s) do { \
4899 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4900 goto end; \
4901 } \
4902 else { \
4903 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4904 } \
4905 } while (0)
4906#define C(c, s) ((c) != 0 ? (s) : " ")
4907
4908static size_t
4909rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4910{
4911 size_t pos = 0;
4912
4913 if (SPECIAL_CONST_P(obj)) {
4914 APPEND_F("%s", obj_type_name(obj));
4915
4916 if (FIXNUM_P(obj)) {
4917 APPEND_F(" %ld", FIX2LONG(obj));
4918 }
4919 else if (SYMBOL_P(obj)) {
4920 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4921 }
4922 }
4923 else {
4924 // const int age = RVALUE_AGE_GET(obj);
4925
4926 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4927 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4928 // TODO: fixme
4929 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4930 // (void *)obj, age,
4931 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4932 // C(RVALUE_MARK_BITMAP(obj), "M"),
4933 // C(RVALUE_PIN_BITMAP(obj), "P"),
4934 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4935 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4936 // C(rb_objspace_garbage_object_p(obj), "G"),
4937 // obj_type_name(obj));
4938 }
4939 else {
4940 /* fake */
4941 // APPEND_F("%p [%dXXXX] %s",
4942 // (void *)obj, age,
4943 // obj_type_name(obj));
4944 }
4945
4946 if (internal_object_p(obj)) {
4947 /* ignore */
4948 }
4949 else if (RBASIC(obj)->klass == 0) {
4950 APPEND_S("(temporary internal)");
4951 }
4952 else if (RTEST(RBASIC(obj)->klass)) {
4953 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4954 if (!NIL_P(class_path)) {
4955 APPEND_F("%s ", RSTRING_PTR(class_path));
4956 }
4957 }
4958 }
4959 end:
4960
4961 return pos;
4962}
4963
4964const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4965
4966static size_t
4967rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4968{
4969 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4970 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4971
4972 switch (type) {
4973 case T_NODE:
4974 UNEXPECTED_NODE(rb_raw_obj_info);
4975 break;
4976 case T_ARRAY:
4977 if (ARY_SHARED_P(obj)) {
4978 APPEND_S("shared -> ");
4979 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4980 }
4981 else {
4982 APPEND_F("[%s%s%s] ",
4983 C(ARY_EMBED_P(obj), "E"),
4984 C(ARY_SHARED_P(obj), "S"),
4985 C(ARY_SHARED_ROOT_P(obj), "R"));
4986
4987 if (ARY_EMBED_P(obj)) {
4988 APPEND_F("len: %ld (embed)",
4989 RARRAY_LEN(obj));
4990 }
4991 else {
4992 APPEND_F("len: %ld, capa:%ld ptr:%p",
4993 RARRAY_LEN(obj),
4994 RARRAY(obj)->as.heap.aux.capa,
4995 (void *)RARRAY_CONST_PTR(obj));
4996 }
4997 }
4998 break;
4999 case T_STRING: {
5000 APPEND_F("[%s%s] ",
5001 C(FL_TEST(obj, RSTRING_FSTR), "F"),
5002 C(RB_OBJ_FROZEN(obj), "R"));
5003
5004 if (STR_SHARED_P(obj)) {
5005 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
5006 }
5007 else {
5008 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
5009
5010 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
5011 }
5012 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
5013 break;
5014 }
5015 case T_SYMBOL: {
5016 VALUE fstr = RSYMBOL(obj)->fstr;
5017 ID id = RSYMBOL(obj)->id;
5018 if (RB_TYPE_P(fstr, T_STRING)) {
5019 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
5020 }
5021 else {
5022 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
5023 }
5024 break;
5025 }
5026 case T_MOVED: {
5027 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
5028 break;
5029 }
5030 case T_HASH: {
5031 APPEND_F("[%c] %"PRIdSIZE,
5032 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
5033 RHASH_SIZE(obj));
5034 break;
5035 }
5036 case T_CLASS:
5037 case T_MODULE:
5038 {
5039 VALUE class_path = rb_class_path_cached(obj);
5040 if (!NIL_P(class_path)) {
5041 APPEND_F("%s", RSTRING_PTR(class_path));
5042 }
5043 else {
5044 APPEND_S("(anon)");
5045 }
5046 break;
5047 }
5048 case T_ICLASS:
5049 {
5050 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
5051 if (!NIL_P(class_path)) {
5052 APPEND_F("src:%s", RSTRING_PTR(class_path));
5053 }
5054 break;
5055 }
5056 case T_OBJECT:
5057 {
5058 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
5059 if (rb_obj_shape_complex_p(obj)) {
5060 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
5061 APPEND_F("(complex) len:%zu", hash_len);
5062 }
5063 else {
5064 APPEND_F("(embed) len:%d capa:%d", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj));
5065 }
5066 }
5067 else {
5068 APPEND_F("len:%d capa:%d ptr:%p", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
5069 }
5070 }
5071 break;
5072 case T_DATA: {
5073 const struct rb_block *block;
5074 const rb_iseq_t *iseq;
5075 if (rb_obj_is_proc(obj) &&
5076 (block = vm_proc_block(obj)) != NULL &&
5077 (vm_block_type(block) == block_type_iseq) &&
5078 (iseq = vm_block_iseq(block)) != NULL) {
5079 rb_raw_iseq_info(BUFF_ARGS, iseq);
5080 }
5081 else if (rb_ractor_p(obj)) {
5082 rb_ractor_t *r = (void *)DATA_PTR(obj);
5083 if (r) {
5084 APPEND_F("r:%d", r->pub.id);
5085 }
5086 }
5087 break;
5088 }
5089 case T_IMEMO: {
5090 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
5091
5092 switch (imemo_type(obj)) {
5093 case imemo_ment:
5094 {
5095 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
5096
5097 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
5098 rb_id2name(me->called_id),
5099 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
5100 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
5101 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
5102 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
5103 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
5104 me->def ? rb_method_type_name(me->def->type) : "NULL",
5105 me->def ? me->def->aliased : -1,
5106 (void *)me->owner, // obj_info(me->owner),
5107 (void *)me->defined_class); //obj_info(me->defined_class)));
5108
5109 if (me->def) {
5110 switch (me->def->type) {
5111 case VM_METHOD_TYPE_ISEQ:
5112 APPEND_S(" (iseq:");
5113 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
5114 APPEND_S(")");
5115 break;
5116 default:
5117 break;
5118 }
5119 }
5120
5121 break;
5122 }
5123 case imemo_iseq: {
5124 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
5125 rb_raw_iseq_info(BUFF_ARGS, iseq);
5126 break;
5127 }
5128 case imemo_callinfo:
5129 {
5130 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
5131 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
5132 rb_id2name(vm_ci_mid(ci)),
5133 vm_ci_flag(ci),
5134 vm_ci_argc(ci),
5135 vm_ci_kwarg(ci) ? "available" : "NULL");
5136 break;
5137 }
5138 case imemo_callcache:
5139 {
5140 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
5141 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
5142 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
5143
5144 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
5145 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
5146 cme ? rb_id2name(cme->called_id) : "<NULL>",
5147 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
5148 (void *)cme,
5149 (void *)(uintptr_t)vm_cc_call(cc));
5150 break;
5151 }
5152 default:
5153 break;
5154 }
5155 }
5156 default:
5157 break;
5158 }
5159 }
5160 end:
5161
5162 return pos;
5163}
5164
5165#undef C
5166
5167#ifdef RUBY_ASAN_ENABLED
5168void
5169rb_asan_poison_object(VALUE obj)
5170{
5171 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5172 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
5173}
5174
5175void
5176rb_asan_unpoison_object(VALUE obj, bool newobj_p)
5177{
5178 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5179 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
5180}
5181
5182void *
5183rb_asan_poisoned_object_p(VALUE obj)
5184{
5185 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5186 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
5187}
5188#endif
5189
5190static void
5191raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5192{
5193 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
5194 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
5195 if (pos >= buff_size) {} // truncated
5196}
5197
5198const char *
5199rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5200{
5201 void *objspace = rb_gc_get_objspace();
5202
5203 if (SPECIAL_CONST_P(obj)) {
5204 raw_obj_info(buff, buff_size, obj);
5205 }
5206 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
5207 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
5208 }
5209#if 0 // maybe no need to check it?
5210 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
5211 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
5212 }
5213#endif
5214 else {
5215 asan_unpoisoning_object(obj) {
5216 raw_obj_info(buff, buff_size, obj);
5217 }
5218 }
5219 return buff;
5220}
5221
5222#undef APPEND_S
5223#undef APPEND_F
5224#undef BUFF_ARGS
5225
5226/* Increments *var atomically and resets *var to 0 when maxval is
5227 * reached. Returns the wraparound old *var value (0...maxval). */
5228static rb_atomic_t
5229atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5230{
5231 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5232 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5233 const rb_atomic_t newval = oldval + 1;
5234 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5235 oldval %= maxval;
5236 }
5237 return oldval;
5238}
5239
5240static const char *
5241obj_info(VALUE obj)
5242{
5243 if (RGENGC_OBJ_INFO) {
5244 static struct {
5245 rb_atomic_t index;
5246 char buffers[10][0x100];
5247 } info = {0};
5248
5249 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5250 char *const buff = info.buffers[index];
5251 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5252 }
5253 return obj_type_name(obj);
5254}
5255
5256/*
5257 ------------------------ Extended allocator ------------------------
5258*/
5259
5261 VALUE exc;
5262 const char *fmt;
5263 va_list *ap;
5264};
5265
5266static void *
5267gc_vraise(void *ptr)
5268{
5269 struct gc_raise_tag *argv = ptr;
5270 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5271 UNREACHABLE_RETURN(NULL);
5272}
5273
5274static void
5275gc_raise(VALUE exc, const char *fmt, ...)
5276{
5277 va_list ap;
5278 va_start(ap, fmt);
5279 struct gc_raise_tag argv = {
5280 exc, fmt, &ap,
5281 };
5282
5283 if (ruby_native_thread_p()) {
5284 rb_thread_call_with_gvl(gc_vraise, &argv);
5286 }
5287 else {
5288 /* Not in a ruby thread */
5289 fprintf(stderr, "%s", "[FATAL] ");
5290 vfprintf(stderr, fmt, ap);
5291 }
5292
5293 va_end(ap);
5294 abort();
5295}
5296
5297NORETURN(static void negative_size_allocation_error(const char *));
5298static void
5299negative_size_allocation_error(const char *msg)
5300{
5301 gc_raise(rb_eNoMemError, "%s", msg);
5302}
5303
5304static void *
5305ruby_memerror_body(void *dummy)
5306{
5307 rb_memerror();
5308 return 0;
5309}
5310
5311NORETURN(static void ruby_memerror(void));
5313static void
5314ruby_memerror(void)
5315{
5316 if (ruby_thread_has_gvl_p()) {
5317 rb_memerror();
5318 }
5319 else {
5320 if (ruby_native_thread_p()) {
5321 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5322 }
5323 else {
5324 /* no ruby thread */
5325 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5326 }
5327 }
5328
5329 /* We have discussions whether we should die here; */
5330 /* We might rethink about it later. */
5331 exit(EXIT_FAILURE);
5332}
5333
5334void
5335rb_memerror(void)
5336{
5337 /* the `GET_VM()->special_exceptions` below assumes that
5338 * the VM is reachable from the current thread. We should
5339 * definitely make sure of that. */
5340 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5341
5342 rb_execution_context_t *ec = GET_EC();
5343 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5344
5345 if (!exc ||
5346 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5347 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5348 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5349 exit(EXIT_FAILURE);
5350 }
5351 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5352 rb_ec_raised_clear(ec);
5353 }
5354 else {
5355 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5356 exc = ruby_vm_special_exception_copy(exc);
5357 }
5358 ec->errinfo = exc;
5359 EC_JUMP_TAG(ec, TAG_RAISE);
5360}
5361
5362bool
5363rb_memerror_reentered(void)
5364{
5365 rb_execution_context_t *ec = GET_EC();
5366 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5367}
5368
5369static void *
5370handle_malloc_failure(void *ptr)
5371{
5372 if (LIKELY(ptr)) {
5373 return ptr;
5374 }
5375 else {
5376 ruby_memerror();
5377 UNREACHABLE_RETURN(ptr);
5378 }
5379}
5380
5381static void *ruby_xmalloc_body(size_t size);
5382
5383void *
5384ruby_xmalloc(size_t size)
5385{
5386 return handle_malloc_failure(ruby_xmalloc_body(size));
5387}
5388
5389static bool
5390malloc_gc_allowed(void)
5391{
5392 rb_ractor_t *r = rb_current_ractor_raw(false);
5393
5394 return r == NULL || !r->malloc_gc_disabled;
5395}
5396
5397static void *
5398ruby_xmalloc_body(size_t size)
5399{
5400 if ((ssize_t)size < 0) {
5401 negative_size_allocation_error("too large allocation size");
5402 }
5403
5404 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5405}
5406
5407void
5408ruby_malloc_size_overflow(size_t count, size_t elsize)
5409{
5410 rb_raise(rb_eArgError,
5411 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5412 count, elsize);
5413}
5414
5415void
5416ruby_malloc_add_size_overflow(size_t x, size_t y)
5417{
5418 rb_raise(rb_eArgError,
5419 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5420 x, y);
5421}
5422
5423static void *ruby_xmalloc2_body(size_t n, size_t size);
5424
5425void *
5426ruby_xmalloc2(size_t n, size_t size)
5427{
5428 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5429}
5430
5431static void *
5432ruby_xmalloc2_body(size_t n, size_t size)
5433{
5434 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5435}
5436
5437static void *ruby_xcalloc_body(size_t n, size_t size);
5438
5439void *
5440ruby_xcalloc(size_t n, size_t size)
5441{
5442 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5443}
5444
5445static void *
5446ruby_xcalloc_body(size_t n, size_t size)
5447{
5448 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5449}
5450
5451static void *ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size);
5452
5453#ifdef ruby_xrealloc_sized
5454#undef ruby_xrealloc_sized
5455#endif
5456void *
5457ruby_xrealloc_sized(void *ptr, size_t new_size, size_t old_size)
5458{
5459 return handle_malloc_failure(ruby_xrealloc_sized_body(ptr, new_size, old_size));
5460}
5461
5462static void *
5463ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size)
5464{
5465 if ((ssize_t)new_size < 0) {
5466 negative_size_allocation_error("too large allocation size");
5467 }
5468
5469 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5470}
5471
5472void *
5473ruby_xrealloc(void *ptr, size_t new_size)
5474{
5475 return ruby_xrealloc_sized(ptr, new_size, 0);
5476}
5477
5478static void *ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n);
5479
5480#ifdef ruby_xrealloc2_sized
5481#undef ruby_xrealloc2_sized
5482#endif
5483void *
5484ruby_xrealloc2_sized(void *ptr, size_t n, size_t size, size_t old_n)
5485{
5486 return handle_malloc_failure(ruby_xrealloc2_sized_body(ptr, n, size, old_n));
5487}
5488
5489static void *
5490ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n)
5491{
5492 size_t len = xmalloc2_size(n, size);
5493 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5494}
5495
5496void *
5497ruby_xrealloc2(void *ptr, size_t n, size_t size)
5498{
5499 return ruby_xrealloc2_sized(ptr, n, size, 0);
5500}
5501
5502#ifdef ruby_xfree_sized
5503#undef ruby_xfree_sized
5504#endif
5505void
5506ruby_xfree_sized(void *x, size_t size)
5507{
5508 if (LIKELY(x)) {
5509 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5510 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5511 * that case. */
5512 if (LIKELY(GET_VM())) {
5513 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5514 }
5515 else {
5516 ruby_mimfree(x);
5517 }
5518 }
5519}
5520
5521void
5522ruby_xfree(void *x)
5523{
5524 ruby_xfree_sized(x, 0);
5525}
5526
5527void *
5528rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5529{
5530 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5531 return ruby_xmalloc(w);
5532}
5533
5534void *
5535rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5536{
5537 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5538 return ruby_xcalloc(w, 1);
5539}
5540
5541void *
5542rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5543{
5544 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5545 return ruby_xrealloc((void *)p, w);
5546}
5547
5548void *
5549rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5550{
5551 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5552 return ruby_xmalloc(u);
5553}
5554
5555void *
5556rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5557{
5558 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5559 return ruby_xcalloc(u, 1);
5560}
5561
5562/* Mimic ruby_xmalloc, but need not rb_objspace.
5563 * should return pointer suitable for ruby_xfree
5564 */
5565void *
5566ruby_mimmalloc(size_t size)
5567{
5568 void *mem;
5569#if CALC_EXACT_MALLOC_SIZE
5570 size += sizeof(struct malloc_obj_info);
5571#endif
5572 mem = malloc(size);
5573#if CALC_EXACT_MALLOC_SIZE
5574 if (!mem) {
5575 return NULL;
5576 }
5577 else
5578 /* set 0 for consistency of allocated_size/allocations */
5579 {
5580 struct malloc_obj_info *info = mem;
5581 info->size = 0;
5582 mem = info + 1;
5583 }
5584#endif
5585 return mem;
5586}
5587
5588void *
5589ruby_mimcalloc(size_t num, size_t size)
5590{
5591 void *mem;
5592#if CALC_EXACT_MALLOC_SIZE
5593 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5594 if (UNLIKELY(t.overflowed)) {
5595 return NULL;
5596 }
5597 size = t.result + sizeof(struct malloc_obj_info);
5598 mem = calloc1(size);
5599 if (!mem) {
5600 return NULL;
5601 }
5602 else
5603 /* set 0 for consistency of allocated_size/allocations */
5604 {
5605 struct malloc_obj_info *info = mem;
5606 info->size = 0;
5607 mem = info + 1;
5608 }
5609#else
5610 mem = calloc(num, size);
5611#endif
5612 return mem;
5613}
5614
5615void
5616ruby_mimfree(void *ptr)
5617{
5618#if CALC_EXACT_MALLOC_SIZE
5619 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5620 ptr = info;
5621#endif
5622 free(ptr);
5623}
5624
5625void
5626rb_gc_adjust_memory_usage(ssize_t diff)
5627{
5628 unless_objspace(objspace) { return; }
5629
5630 rb_gc_impl_adjust_memory_usage(objspace, diff);
5631}
5632
5633const char *
5634rb_obj_info(VALUE obj)
5635{
5636 return obj_info(obj);
5637}
5638
5639void
5640rb_obj_info_dump(VALUE obj)
5641{
5642 char buff[0x100];
5643 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5644}
5645
5646void
5647rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5648{
5649 char buff[0x100];
5650 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5651}
5652
5653void
5654rb_gc_before_fork(void)
5655{
5656 rb_gc_impl_before_fork(rb_gc_get_objspace());
5657}
5658
5659void
5660rb_gc_after_fork(rb_pid_t pid)
5661{
5662 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5663}
5664
5665bool
5666rb_gc_obj_shareable_p(VALUE obj)
5667{
5668 return RB_OBJ_SHAREABLE_P(obj);
5669}
5670
5671void
5672rb_gc_rp(VALUE obj)
5673{
5674 rp(obj);
5675}
5676
5678 VALUE parent;
5679 long err_count;
5680};
5681
5682static void
5683check_shareable_i(const VALUE child, void *ptr)
5684{
5685 struct check_shareable_data *data = (struct check_shareable_data *)ptr;
5686
5687 if (!rb_gc_obj_shareable_p(child)) {
5688 fprintf(stderr, "(a) ");
5689 rb_gc_rp(data->parent);
5690 fprintf(stderr, "(b) ");
5691 rb_gc_rp(child);
5692 fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
5693
5694 data->err_count++;
5695 rb_bug("!! violate shareable constraint !!");
5696 }
5697}
5698
5699static bool gc_checking_shareable = false;
5700
5701static void
5702gc_verify_shareable(void *objspace, VALUE obj, void *data)
5703{
5704 // while gc_checking_shareable is true,
5705 // other Ractors should not run the GC, until the flag is not local.
5706 // TODO: remove VM locking if the flag is Ractor local
5707
5708 unsigned int lev = RB_GC_VM_LOCK();
5709 {
5710 gc_checking_shareable = true;
5711 rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
5712 gc_checking_shareable = false;
5713 }
5714 RB_GC_VM_UNLOCK(lev);
5715}
5716
5717// TODO: only one level (non-recursive)
5718void
5719rb_gc_verify_shareable(VALUE obj)
5720{
5721 rb_objspace_t *objspace = rb_gc_get_objspace();
5722 struct check_shareable_data data = {
5723 .parent = obj,
5724 .err_count = 0,
5725 };
5726 gc_verify_shareable(objspace, obj, &data);
5727
5728 if (data.err_count > 0) {
5729 rb_bug("rb_gc_verify_shareable");
5730 }
5731}
5732
5733bool
5734rb_gc_checking_shareable(void)
5735{
5736 return gc_checking_shareable;
5737}
5738
5739/*
5740 * Document-module: ObjectSpace
5741 *
5742 * The ObjectSpace module contains a number of routines
5743 * that interact with the garbage collection facility and allow you to
5744 * traverse all living objects with an iterator.
5745 *
5746 * ObjectSpace also provides support for object finalizers, procs that will be
5747 * called after a specific object was destroyed by garbage collection. See
5748 * the documentation for +ObjectSpace.define_finalizer+ for important
5749 * information on how to use this method correctly.
5750 *
5751 * a = "A"
5752 * b = "B"
5753 *
5754 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5755 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5756 *
5757 * a = nil
5758 * b = nil
5759 *
5760 * _produces:_
5761 *
5762 * Finalizer two on 537763470
5763 * Finalizer one on 537763480
5764 */
5765
5766/* Document-class: GC::Profiler
5767 *
5768 * The GC profiler provides access to information on GC runs including time,
5769 * length and object space size.
5770 *
5771 * Example:
5772 *
5773 * GC::Profiler.enable
5774 *
5775 * require 'rdoc/rdoc'
5776 *
5777 * GC::Profiler.report
5778 *
5779 * GC::Profiler.disable
5780 *
5781 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5782 */
5783
5784#include "gc.rbinc"
5785
5786void
5787Init_GC(void)
5788{
5789#undef rb_intern
5790 rb_gc_register_address(&id2ref_value);
5791
5792 malloc_offset = gc_compute_malloc_offset();
5793
5794 rb_mGC = rb_define_module("GC");
5795
5796 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5797
5798 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5799
5800 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5801 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5802
5803 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5804
5805 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5806
5807 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5808 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5809
5810 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5811
5812 rb_gc_impl_init();
5813}
5814
5815// Set a name for the anonymous virtual memory area. `addr` is the starting
5816// address of the area and `size` is its length in bytes. `name` is a
5817// NUL-terminated human-readable string.
5818//
5819// This function is usually called after calling `mmap()`. The human-readable
5820// annotation helps developers identify the call site of `mmap()` that created
5821// the memory mapping.
5822//
5823// This function currently only works on Linux 5.17 or higher. After calling
5824// this function, we can see annotations in the form of "[anon:...]" in
5825// `/proc/self/maps`, where `...` is the content of `name`. This function has
5826// no effect when called on other platforms.
5827void
5828ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5829{
5830#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5831 // The name length cannot exceed 80 (including the '\0').
5832 RUBY_ASSERT(strlen(name) < 80);
5833 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5834 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5835 // reasons.
5836 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5837 // 2. addr is an invalid address.
5838 // 3. The string pointed by name is too long.
5839 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5840 // happen if we run the compiled binary on an old kernel. In theory, all
5841 // other errors should result in a failure. But since EINVAL cannot tell
5842 // the first error from others, and this function is mainly used for
5843 // debugging, we silently ignore the error.
5844 errno = 0;
5845#endif
5846}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
Definition fl_type.h:711
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:186
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1509
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3061
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:130
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:118
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2838
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2878
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1438
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1431
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_mGC
GC module.
Definition gc.c:410
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:226
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:894
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3335
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1121
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:242
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:122
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1754
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:1005
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1318
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1742
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:690
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1751
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3469
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5701
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2063
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1135
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1152
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RUBY_NEVER_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:85
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:133
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition rstruct.h:82
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:669
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:81
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1182
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1192
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
static const rb_data_type_t * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:687
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:96
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9064
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby's array.
Definition rarray.h:128
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Regular expression execution context.
Definition rmatch.h:79
union RMatch::@55 as
"Registers" of a match.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:98
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:95
int num_regs
Number of capture-group registers.
Definition rmatch.h:101
Ruby's ordinal objects.
Definition robject.h:85
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:384
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:404
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:390
Definition method.h:63
Definition constant.h:33
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
struct rb_data_type_struct::@62 function
Function pointers.
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:259
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:280
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:236
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:250
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:343
Definition gc_impl.h:15
Private header for the default GC and other GC implementations first introduced for [Feature #20470].
Definition gc.h:16
Ruby's IO, metadata and buffers.
Definition io.h:295
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:145