Ruby 4.1.0dev (2026-05-15 revision a8bcae043f931d9b79f1cb1fe2c021985d07b984)
default.c
1#include "ruby/internal/config.h"
2
3#include <signal.h>
4
5#ifndef _WIN32
6# include <sys/mman.h>
7# include <unistd.h>
8# ifdef HAVE_SYS_PRCTL_H
9# include <sys/prctl.h>
10# endif
11#endif
12
13#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
14/* LIST_HEAD conflicts with sys/queue.h on macOS */
15# include <sys/user.h>
16#endif
17
18#ifdef BUILDING_MODULAR_GC
19# define nlz_int64(x) (x == 0 ? 64 : (unsigned int)__builtin_clzll((unsigned long long)x))
20#else
21# include "internal/bits.h"
22#endif
23
24#include "ruby/ruby.h"
25#include "ruby/atomic.h"
26#include "ruby/debug.h"
27#include "ruby/thread.h"
28#include "ruby/util.h"
29#include "ruby/vm.h"
31#include "ccan/list/list.h"
32#include "darray.h"
33#include "gc/gc.h"
34#include "gc/gc_impl.h"
35
36#ifndef BUILDING_MODULAR_GC
37# include "probes.h"
38#endif
39
40#ifdef BUILDING_MODULAR_GC
41# define RB_DEBUG_COUNTER_INC(_name) ((void)0)
42# define RB_DEBUG_COUNTER_INC_IF(_name, cond) (!!(cond))
43#else
44# include "debug_counter.h"
45#endif
46
47#ifdef BUILDING_MODULAR_GC
48# define rb_asan_poison_object(obj) ((void)(obj))
49# define rb_asan_unpoison_object(obj, newobj_p) ((void)(obj), (void)(newobj_p))
50# define asan_unpoisoning_object(obj) if ((obj) || true)
51# define asan_poison_memory_region(ptr, size) ((void)(ptr), (void)(size))
52# define asan_unpoison_memory_region(ptr, size, malloc_p) ((void)(ptr), (size), (malloc_p))
53# define asan_unpoisoning_memory_region(ptr, size) if ((ptr) || (size) || true)
54
55# define VALGRIND_MAKE_MEM_DEFINED(ptr, size) ((void)(ptr), (void)(size))
56# define VALGRIND_MAKE_MEM_UNDEFINED(ptr, size) ((void)(ptr), (void)(size))
57#else
58# include "internal/sanitizers.h"
59#endif
60
61/* MALLOC_HEADERS_BEGIN */
62#ifndef HAVE_MALLOC_USABLE_SIZE
63# ifdef _WIN32
64# define HAVE_MALLOC_USABLE_SIZE
65# define malloc_usable_size(a) _msize(a)
66# elif defined HAVE_MALLOC_SIZE
67# define HAVE_MALLOC_USABLE_SIZE
68# define malloc_usable_size(a) malloc_size(a)
69# endif
70#endif
71
72#ifdef HAVE_MALLOC_USABLE_SIZE
73# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
74/* Alternative malloc header is included in ruby/missing.h */
75# elif defined(HAVE_MALLOC_H)
76# include <malloc.h>
77# elif defined(HAVE_MALLOC_NP_H)
78# include <malloc_np.h>
79# elif defined(HAVE_MALLOC_MALLOC_H)
80# include <malloc/malloc.h>
81# endif
82#endif
83
84#ifdef HAVE_MALLOC_TRIM
85# include <malloc.h>
86
87# ifdef __EMSCRIPTEN__
88/* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
89# include <emscripten/emmalloc.h>
90# endif
91#endif
92
93#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
94# include <mach/task.h>
95# include <mach/mach_init.h>
96# include <mach/mach_port.h>
97#endif
98
99#ifndef VM_CHECK_MODE
100# define VM_CHECK_MODE RUBY_DEBUG
101#endif
102
103// From ractor_core.h
104#ifndef RACTOR_CHECK_MODE
105# define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
106#endif
107
108#ifndef RUBY_DEBUG_LOG
109# define RUBY_DEBUG_LOG(...)
110#endif
111
112#ifndef GC_HEAP_INIT_BYTES
113#define GC_HEAP_INIT_BYTES (2560 * 1024)
114#endif
115#ifndef GC_HEAP_FREE_SLOTS
116#define GC_HEAP_FREE_SLOTS 4096
117#endif
118#ifndef GC_HEAP_GROWTH_FACTOR
119#define GC_HEAP_GROWTH_FACTOR 1.8
120#endif
121#ifndef GC_HEAP_GROWTH_MAX_BYTES
122#define GC_HEAP_GROWTH_MAX_BYTES 0 /* 0 is disable */
123#endif
124#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
125# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
126#endif
127#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
128#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
129#endif
130
131#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
132#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
133#endif
134#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
135#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
136#endif
137#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
138#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
139#endif
140
141#ifndef GC_MALLOC_LIMIT_MIN
142#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
143#endif
144#ifndef GC_MALLOC_LIMIT_MAX
145#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
146#endif
147#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
148#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
149#endif
150
151#ifndef GC_OLDMALLOC_LIMIT_MIN
152#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
153#endif
154#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
155#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
156#endif
157#ifndef GC_OLDMALLOC_LIMIT_MAX
158#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
159#endif
160
161#ifndef GC_MALLOC_INCREASE_LOCAL_THRESHOLD
162#define GC_MALLOC_INCREASE_LOCAL_THRESHOLD (8 * 1024 /* 8KB */)
163#endif
164
165#ifdef RB_THREAD_LOCAL_SPECIFIER
166#define USE_MALLOC_INCREASE_LOCAL 1
167static RB_THREAD_LOCAL_SPECIFIER int malloc_increase_local;
168#else
169#define USE_MALLOC_INCREASE_LOCAL 0
170#endif
171
172#ifndef GC_CAN_COMPILE_COMPACTION
173#if defined(__wasi__) /* WebAssembly doesn't support signals */
174# define GC_CAN_COMPILE_COMPACTION 0
175#else
176# define GC_CAN_COMPILE_COMPACTION 1
177#endif
178#endif
179
180#ifndef PRINT_ENTER_EXIT_TICK
181# define PRINT_ENTER_EXIT_TICK 0
182#endif
183#ifndef PRINT_ROOT_TICKS
184#define PRINT_ROOT_TICKS 0
185#endif
186
187#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS)
188
189#ifndef HEAP_COUNT
190# if SIZEOF_VALUE >= 8
191# define HEAP_COUNT 12
192# else
193# define HEAP_COUNT 5
194# endif
195#endif
196
197/* The reciprocal table and pool_slot_sizes array are both generated from this
198 * single definition, so they can never get out of sync. */
199#if SIZEOF_VALUE >= 8
200# define EACH_POOL_SLOT_SIZE(SLOT) \
201 SLOT(32) SLOT(40) SLOT(64) SLOT(80) SLOT(96) SLOT(128) \
202 SLOT(160) SLOT(256) SLOT(512) SLOT(640) SLOT(768) SLOT(1024)
203#else
204# define EACH_POOL_SLOT_SIZE(SLOT) \
205 SLOT(32) SLOT(64) SLOT(128) SLOT(256) SLOT(512)
206#endif
207
208/* Precomputed reciprocals for fast slot index calculation.
209 * For slot size d: reciprocal = ceil(2^48 / d).
210 * Then offset / d == (uint32_t)((offset * reciprocal) >> 48)
211 * for all offset < HEAP_PAGE_SIZE. */
212#define SLOT_RECIPROCAL_SHIFT 48
213#define SLOT_RECIPROCAL(size) (((1ULL << SLOT_RECIPROCAL_SHIFT) + (size) - 1) / (size))
214
215static const uint64_t heap_slot_reciprocal_table[HEAP_COUNT] = {
216#define SLOT(size) SLOT_RECIPROCAL(size),
217 EACH_POOL_SLOT_SIZE(SLOT)
218#undef SLOT
219};
221 struct free_slot *freelist;
222 struct heap_page *using_page;
223 size_t allocated_objects_count;
225
226typedef struct ractor_newobj_cache {
227 size_t incremental_mark_step_allocated_slots;
228 rb_ractor_newobj_heap_cache_t heap_caches[HEAP_COUNT];
230
231typedef struct {
232 size_t heap_init_bytes;
233 size_t heap_free_slots;
234 double growth_factor;
235 size_t growth_max_bytes;
236
237 double heap_free_slots_min_ratio;
238 double heap_free_slots_goal_ratio;
239 double heap_free_slots_max_ratio;
240 double uncollectible_wb_unprotected_objects_limit_ratio;
241 double oldobject_limit_factor;
242
243 size_t malloc_limit_min;
244 size_t malloc_limit_max;
245 double malloc_limit_growth_factor;
246
247 size_t oldmalloc_limit_min;
248 size_t oldmalloc_limit_max;
249 double oldmalloc_limit_growth_factor;
251
252static ruby_gc_params_t gc_params = {
253 GC_HEAP_INIT_BYTES,
254 GC_HEAP_FREE_SLOTS,
255 GC_HEAP_GROWTH_FACTOR,
256 GC_HEAP_GROWTH_MAX_BYTES,
257
258 GC_HEAP_FREE_SLOTS_MIN_RATIO,
259 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
260 GC_HEAP_FREE_SLOTS_MAX_RATIO,
261 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
262 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
263
264 GC_MALLOC_LIMIT_MIN,
265 GC_MALLOC_LIMIT_MAX,
266 GC_MALLOC_LIMIT_GROWTH_FACTOR,
267
268 GC_OLDMALLOC_LIMIT_MIN,
269 GC_OLDMALLOC_LIMIT_MAX,
270 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
271};
272
273/* GC_DEBUG:
274 * enable to embed GC debugging information.
275 */
276#ifndef GC_DEBUG
277#define GC_DEBUG 0
278#endif
279
280/* RGENGC_DEBUG:
281 * 1: basic information
282 * 2: remember set operation
283 * 3: mark
284 * 4:
285 * 5: sweep
286 */
287#ifndef RGENGC_DEBUG
288#ifdef RUBY_DEVEL
289#define RGENGC_DEBUG -1
290#else
291#define RGENGC_DEBUG 0
292#endif
293#endif
294#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
295# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
296#elif defined(HAVE_VA_ARGS_MACRO)
297# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
298#else
299# define RGENGC_DEBUG_ENABLED(level) 0
300#endif
301int ruby_rgengc_debug;
302
303/* RGENGC_PROFILE
304 * 0: disable RGenGC profiling
305 * 1: enable profiling for basic information
306 * 2: enable profiling for each types
307 */
308#ifndef RGENGC_PROFILE
309# define RGENGC_PROFILE 0
310#endif
311
312/* RGENGC_ESTIMATE_OLDMALLOC
313 * Enable/disable to estimate increase size of malloc'ed size by old objects.
314 * If estimation exceeds threshold, then will invoke full GC.
315 * 0: disable estimation.
316 * 1: enable estimation.
317 */
318#ifndef RGENGC_ESTIMATE_OLDMALLOC
319# define RGENGC_ESTIMATE_OLDMALLOC 1
320#endif
321
322#ifndef GC_PROFILE_MORE_DETAIL
323# define GC_PROFILE_MORE_DETAIL 0
324#endif
325#ifndef GC_PROFILE_DETAIL_MEMORY
326# define GC_PROFILE_DETAIL_MEMORY 0
327#endif
328#ifndef GC_ENABLE_LAZY_SWEEP
329# define GC_ENABLE_LAZY_SWEEP 1
330#endif
331
332#ifndef VERIFY_FREE_SIZE
333#if RUBY_DEBUG
334#define VERIFY_FREE_SIZE 1
335#else
336#define VERIFY_FREE_SIZE 0
337#endif
338#endif
339
340#if VERIFY_FREE_SIZE
341#undef CALC_EXACT_MALLOC_SIZE
342#define CALC_EXACT_MALLOC_SIZE 1
343#endif
344
345#ifndef CALC_EXACT_MALLOC_SIZE
346# define CALC_EXACT_MALLOC_SIZE 0
347#endif
348
349#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
350# ifndef MALLOC_ALLOCATED_SIZE
351# define MALLOC_ALLOCATED_SIZE 0
352# endif
353#else
354# define MALLOC_ALLOCATED_SIZE 0
355#endif
356#ifndef MALLOC_ALLOCATED_SIZE_CHECK
357# define MALLOC_ALLOCATED_SIZE_CHECK 0
358#endif
359
360#ifndef GC_DEBUG_STRESS_TO_CLASS
361# define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
362#endif
363
364typedef enum {
365 GPR_FLAG_NONE = 0x000,
366 /* major reason */
367 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
368 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
369 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
370 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
371#if RGENGC_ESTIMATE_OLDMALLOC
372 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
373#endif
374 GPR_FLAG_MAJOR_MASK = 0x0ff,
375
376 /* gc reason */
377 GPR_FLAG_NEWOBJ = 0x100,
378 GPR_FLAG_MALLOC = 0x200,
379 GPR_FLAG_METHOD = 0x400,
380 GPR_FLAG_CAPI = 0x800,
381 GPR_FLAG_STRESS = 0x1000,
382
383 /* others */
384 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
385 GPR_FLAG_HAVE_FINALIZE = 0x4000,
386 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
387 GPR_FLAG_FULL_MARK = 0x10000,
388 GPR_FLAG_COMPACT = 0x20000,
389
390 GPR_DEFAULT_REASON =
391 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
392 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
393} gc_profile_record_flag;
394
395typedef struct gc_profile_record {
396 unsigned int flags;
397
398 double gc_time;
399 double gc_invoke_time;
400
401 size_t heap_total_objects;
402 size_t heap_use_size;
403 size_t heap_total_size;
404 size_t moved_objects;
405
406#if GC_PROFILE_MORE_DETAIL
407 double gc_mark_time;
408 double gc_sweep_time;
409
410 size_t heap_use_pages;
411 size_t heap_live_objects;
412 size_t heap_free_objects;
413
414 size_t allocate_increase;
415 size_t allocate_limit;
416
417 double prepare_time;
418 size_t removing_objects;
419 size_t empty_objects;
420#if GC_PROFILE_DETAIL_MEMORY
421 long maxrss;
422 long minflt;
423 long majflt;
424#endif
425#endif
426#if MALLOC_ALLOCATED_SIZE
427 size_t allocated_size;
428#endif
429
430#if RGENGC_PROFILE > 0
431 size_t old_objects;
432 size_t remembered_normal_objects;
433 size_t remembered_shady_objects;
434#endif
436
437struct RMoved {
438 VALUE flags;
439 VALUE dummy;
440 VALUE destination;
441};
442
443#define RMOVED(obj) ((struct RMoved *)(obj))
444
445typedef uintptr_t bits_t;
446enum {
447 BITS_SIZE = sizeof(bits_t),
448 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
449};
450
452 struct heap_page *page;
453};
454
456 struct heap_page_header header;
457 /* char gap[]; */
458 /* RVALUE values[]; */
459};
460
461#define STACK_CHUNK_SIZE 500
462
463typedef struct stack_chunk {
464 VALUE data[STACK_CHUNK_SIZE];
465 struct stack_chunk *next;
467
468typedef struct mark_stack {
469 stack_chunk_t *chunk;
470 stack_chunk_t *cache;
471 int index;
472 int limit;
473 size_t cache_size;
474 size_t unused_cache_size;
476
477typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d);
478
479typedef struct rb_heap_struct {
480 short slot_size;
481
482 /* Basic statistics */
483 size_t total_allocated_pages;
484 size_t force_major_gc_count;
485 size_t force_incremental_marking_finish_count;
486 size_t total_allocated_objects;
487 size_t total_freed_objects;
488 size_t final_slots_count;
489
490 /* Sweeping statistics */
491 size_t freed_slots;
492 size_t empty_slots;
493
494 struct heap_page *free_pages;
495 struct ccan_list_head pages;
496 struct heap_page *sweeping_page; /* iterator for .pages */
497 struct heap_page *compact_cursor;
498 uintptr_t compact_cursor_index;
499 struct heap_page *pooled_pages;
500 size_t total_pages; /* total page count in a heap */
501 size_t total_slots; /* total slot count */
502
503} rb_heap_t;
504
505enum {
506 gc_stress_no_major,
507 gc_stress_no_immediate_sweep,
508 gc_stress_full_mark_after_malloc,
509 gc_stress_max
510};
511
512enum gc_mode {
513 gc_mode_none,
514 gc_mode_marking,
515 gc_mode_sweeping,
516 gc_mode_compacting,
517};
518
519typedef struct rb_objspace {
520 struct {
521 size_t increase;
522#if RGENGC_ESTIMATE_OLDMALLOC
523 size_t oldmalloc_increase;
524#endif
525 } malloc_counters;
526
527 struct {
528 size_t limit;
529#if MALLOC_ALLOCATED_SIZE
530 size_t allocated_size;
531 size_t allocations;
532#endif
533 } malloc_params;
534
536 bool full_mark;
537 } gc_config;
538
539 struct {
540 unsigned int mode : 2;
541 unsigned int immediate_sweep : 1;
542 unsigned int dont_gc : 1;
543 unsigned int dont_incremental : 1;
544 unsigned int during_gc : 1;
545 unsigned int during_compacting : 1;
546 unsigned int during_reference_updating : 1;
547 unsigned int gc_stressful: 1;
548 unsigned int during_minor_gc : 1;
549 unsigned int during_incremental_marking : 1;
550 unsigned int measure_gc : 1;
551 } flags;
552
553 rb_event_flag_t hook_events;
554
555 rb_heap_t heaps[HEAP_COUNT];
556 size_t empty_pages_count;
557 struct heap_page *empty_pages;
558
559 struct {
560 rb_atomic_t finalizing;
561 } atomic_flags;
562
564 size_t marked_slots;
565
566 struct {
567 rb_darray(struct heap_page *) sorted;
568
569 size_t allocated_pages;
570 size_t freed_pages;
571 uintptr_t range[2];
572 size_t freeable_pages;
573
574 size_t allocatable_bytes;
575
576 /* final */
577 VALUE deferred_final;
578 } heap_pages;
579
580 st_table *finalizer_table;
581
582 struct {
583 int run;
584 unsigned int latest_gc_info;
585 gc_profile_record *records;
586 gc_profile_record *current_record;
587 size_t next_index;
588 size_t size;
589
590#if GC_PROFILE_MORE_DETAIL
591 double prepare_time;
592#endif
593 double invoke_time;
594
595 size_t minor_gc_count;
596 size_t major_gc_count;
597 size_t compact_count;
598 size_t read_barrier_faults;
599#if RGENGC_PROFILE > 0
600 size_t total_generated_normal_object_count;
601 size_t total_generated_shady_object_count;
602 size_t total_shade_operation_count;
603 size_t total_promoted_count;
604 size_t total_remembered_normal_object_count;
605 size_t total_remembered_shady_object_count;
606
607#if RGENGC_PROFILE >= 2
608 size_t generated_normal_object_count_types[RUBY_T_MASK];
609 size_t generated_shady_object_count_types[RUBY_T_MASK];
610 size_t shade_operation_count_types[RUBY_T_MASK];
611 size_t promoted_types[RUBY_T_MASK];
612 size_t remembered_normal_object_count_types[RUBY_T_MASK];
613 size_t remembered_shady_object_count_types[RUBY_T_MASK];
614#endif
615#endif /* RGENGC_PROFILE */
616
617 /* temporary profiling space */
618 double gc_sweep_start_time;
619 size_t total_allocated_objects_at_gc_start;
620 size_t heap_used_at_gc_start;
621 size_t heap_total_slots_at_gc_start;
622
623 /* basic statistics */
624 size_t count;
625 unsigned long long marking_time_ns;
626 struct timespec marking_start_time;
627 unsigned long long sweeping_time_ns;
628 struct timespec sweeping_start_time;
629
630 /* Weak references */
631 size_t weak_references_count;
632 } profile;
633
634 VALUE gc_stress_mode;
635
636 struct {
637 bool parent_object_old_p;
638 VALUE parent_object;
639
640 int need_major_gc;
641 size_t last_major_gc;
642 size_t uncollectible_wb_unprotected_objects;
643 size_t uncollectible_wb_unprotected_objects_limit;
644 size_t old_objects;
645 size_t old_objects_limit;
646
647#if RGENGC_ESTIMATE_OLDMALLOC
648 size_t oldmalloc_increase_limit;
649#endif
650
651#if RGENGC_CHECK_MODE >= 2
652 struct st_table *allrefs_table;
653 size_t error_count;
654#endif
655 } rgengc;
656
657 struct {
658 size_t considered_count_table[T_MASK];
659 size_t moved_count_table[T_MASK];
660 size_t moved_up_count_table[T_MASK];
661 size_t moved_down_count_table[T_MASK];
662 size_t total_moved;
663
664 /* This function will be used, if set, to sort the heap prior to compaction */
665 gc_compact_compare_func compare_func;
666 } rcompactor;
667
668 struct {
669 size_t pooled_slots;
670 size_t step_slots;
671 } rincgc;
672
673#if GC_DEBUG_STRESS_TO_CLASS
674 VALUE stress_to_class;
675#endif
676
677 rb_darray(VALUE) weak_references;
678 rb_postponed_job_handle_t finalize_deferred_pjob;
679
680 unsigned long live_ractor_cache_count;
681
682 int sweeping_heap_count;
683
684 int fork_vm_lock_lev;
685
686 struct rb_gc_vm_context vm_context;
688
689#ifndef HEAP_PAGE_ALIGN_LOG
690/* default tiny heap size: 64KiB */
691#define HEAP_PAGE_ALIGN_LOG 16
692#endif
693
694#if RACTOR_CHECK_MODE || GC_DEBUG
695struct rvalue_overhead {
696# if RACTOR_CHECK_MODE
697 uint32_t _ractor_belonging_id;
698# endif
699# if GC_DEBUG
700 const char *file;
701 int line;
702# endif
703};
704
705// Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
706# define RVALUE_OVERHEAD (sizeof(struct { \
707 union { \
708 struct rvalue_overhead overhead; \
709 VALUE value; \
710 }; \
711}))
712size_t rb_gc_impl_obj_slot_size(VALUE obj);
713# define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
714#else
715# ifndef RVALUE_OVERHEAD
716# define RVALUE_OVERHEAD 0
717# endif
718#endif
719
720#define RVALUE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
721
722static const size_t pool_slot_sizes[HEAP_COUNT] = {
723#define SLOT(size) size,
724 EACH_POOL_SLOT_SIZE(SLOT)
725#undef SLOT
726};
727
728
729#if SIZEOF_VALUE >= 8
730static uint8_t size_to_heap_idx[1024 / 8 + 1];
731#else
732static uint8_t size_to_heap_idx[512 / 8 + 1];
733#endif
734
735#ifndef MAX
736# define MAX(a, b) (((a) > (b)) ? (a) : (b))
737#endif
738#ifndef MIN
739# define MIN(a, b) (((a) < (b)) ? (a) : (b))
740#endif
741#define roomof(x, y) (((x) + (y) - 1) / (y))
742#define CEILDIV(i, mod) roomof(i, mod)
743#define MIN_POOL_SLOT_SIZE 32
744enum {
745 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
746 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
747 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
748 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, MIN_POOL_SLOT_SIZE), BITS_BITLENGTH),
749 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
750};
751#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
752#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
753
754#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
755# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
756#endif
757
758#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
759/* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
760 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
761
762#ifndef HAVE_MMAP
763/* We can't use mmap of course, if it is not available. */
764static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
765
766#elif defined(__wasm__)
767/* wasmtime does not have proper support for mmap.
768 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
769 */
770static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
771
772#elif HAVE_CONST_PAGE_SIZE
773/* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
774static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
775
776#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
777/* If we can use the maximum page size. */
778static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
779
780#elif defined(PAGE_SIZE)
781/* If the PAGE_SIZE macro can be used dynamically. */
782# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
783
784#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
785/* If we can use sysconf to determine the page size. */
786# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
787
788#else
789/* Otherwise we can't determine the system page size, so don't use mmap. */
790static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
791#endif
792
793#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
794/* We can determine the system page size at runtime. */
795# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
796
797static bool heap_page_alloc_use_mmap;
798#endif
799
800#define RVALUE_AGE_BIT_COUNT 2
801#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
802#define RVALUE_OLD_AGE 3
803
804struct free_slot {
805 VALUE flags; /* always 0 for freed obj */
806 struct free_slot *next;
807};
808
809struct heap_page {
810 /* Cache line 0: allocation fast path + SLOT_INDEX */
811 struct free_slot *freelist;
812 uintptr_t start;
813 uint64_t slot_size_reciprocal;
814 unsigned short slot_size;
815 unsigned short total_slots;
816 unsigned short free_slots;
817 unsigned short final_slots;
818 unsigned short pinned_slots;
819 struct {
820 unsigned int before_sweep : 1;
821 unsigned int has_remembered_objects : 1;
822 unsigned int has_uncollectible_wb_unprotected_objects : 1;
823 } flags;
824
825 rb_heap_t *heap;
826
827 struct heap_page *free_next;
828 struct heap_page_body *body;
829 struct ccan_list_node page_node;
830
831 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
832 /* the following three bitmaps are cleared at the beginning of full GC */
833 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
834 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
835 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
836
837 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
838
839 /* If set, the object is not movable */
840 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
841 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
842};
843
844/*
845 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
846 */
847static void
848asan_lock_freelist(struct heap_page *page)
849{
850 asan_poison_memory_region(&page->freelist, sizeof(struct free_list *));
851}
852
853/*
854 * When asan is enabled, this will enable the ability to write to the freelist
855 */
856static void
857asan_unlock_freelist(struct heap_page *page)
858{
859 asan_unpoison_memory_region(&page->freelist, sizeof(struct free_list *), false);
860}
861
862static inline bool
863heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page *page)
864{
865 if (page->total_slots == 0) {
866 GC_ASSERT(page->start == 0);
867 GC_ASSERT(page->slot_size == 0);
868 GC_ASSERT(page->heap == NULL);
869 GC_ASSERT(page->free_slots == 0);
870 asan_unpoisoning_memory_region(&page->freelist, sizeof(&page->freelist)) {
871 GC_ASSERT(page->freelist == NULL);
872 }
873
874 return true;
875 }
876 else {
877 GC_ASSERT(page->start != 0);
878 GC_ASSERT(page->slot_size != 0);
879 GC_ASSERT(page->heap != NULL);
880
881 return false;
882 }
883}
884
885#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
886#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
887#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
888
889static inline size_t
890slot_index_for_offset(size_t offset, uint64_t reciprocal)
891{
892 return (uint32_t)(((uint64_t)offset * reciprocal) >> SLOT_RECIPROCAL_SHIFT);
893}
894
895#define SLOT_INDEX(page, p) slot_index_for_offset((uintptr_t)(p) - (page)->start, (page)->slot_size_reciprocal)
896#define SLOT_BITMAP_INDEX(page, p) (SLOT_INDEX(page, p) / BITS_BITLENGTH)
897#define SLOT_BITMAP_OFFSET(page, p) (SLOT_INDEX(page, p) & (BITS_BITLENGTH - 1))
898#define SLOT_BITMAP_BIT(page, p) ((bits_t)1 << SLOT_BITMAP_OFFSET(page, p))
899
900#define _MARKED_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] & SLOT_BITMAP_BIT(page, p))
901#define _MARK_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] |= SLOT_BITMAP_BIT(page, p))
902#define _CLEAR_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] &= ~SLOT_BITMAP_BIT(page, p))
903
904#define MARKED_IN_BITMAP(bits, p) _MARKED_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
905#define MARK_IN_BITMAP(bits, p) _MARK_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
906#define CLEAR_IN_BITMAP(bits, p) _CLEAR_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
907
908#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
909#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
910#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
911#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
912#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
913
914static int
915RVALUE_AGE_GET(VALUE obj)
916{
917 struct heap_page *page = GET_HEAP_PAGE(obj);
918 bits_t *age_bits = page->age_bits;
919 size_t slot_idx = SLOT_INDEX(page, obj);
920 size_t idx = (slot_idx / BITS_BITLENGTH) * 2;
921 int shift = (int)(slot_idx & (BITS_BITLENGTH - 1));
922 int lo = (age_bits[idx] >> shift) & 1;
923 int hi = (age_bits[idx + 1] >> shift) & 1;
924 return lo | (hi << 1);
925}
926
927static void
928RVALUE_AGE_SET_BITMAP(VALUE obj, int age)
929{
930 RUBY_ASSERT(age <= RVALUE_OLD_AGE);
931 struct heap_page *page = GET_HEAP_PAGE(obj);
932 bits_t *age_bits = page->age_bits;
933 size_t slot_idx = SLOT_INDEX(page, obj);
934 size_t idx = (slot_idx / BITS_BITLENGTH) * 2;
935 int shift = (int)(slot_idx & (BITS_BITLENGTH - 1));
936 bits_t mask = (bits_t)1 << shift;
937
938 age_bits[idx] = (age_bits[idx] & ~mask) | ((bits_t)(age & 1) << shift);
939 age_bits[idx + 1] = (age_bits[idx + 1] & ~mask) | ((bits_t)((age >> 1) & 1) << shift);
940}
941
942static void
943RVALUE_AGE_SET(VALUE obj, int age)
944{
945 RVALUE_AGE_SET_BITMAP(obj, age);
946 if (age == RVALUE_OLD_AGE) {
948 }
949 else {
951 }
952}
953
954#define malloc_limit objspace->malloc_params.limit
955#define malloc_increase objspace->malloc_counters.increase
956#define malloc_allocated_size objspace->malloc_params.allocated_size
957#define heap_pages_lomem objspace->heap_pages.range[0]
958#define heap_pages_himem objspace->heap_pages.range[1]
959#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
960#define heap_pages_deferred_final objspace->heap_pages.deferred_final
961#define heaps objspace->heaps
962#define during_gc objspace->flags.during_gc
963#define finalizing objspace->atomic_flags.finalizing
964#define finalizer_table objspace->finalizer_table
965#define ruby_gc_stressful objspace->flags.gc_stressful
966#define ruby_gc_stress_mode objspace->gc_stress_mode
967#if GC_DEBUG_STRESS_TO_CLASS
968#define stress_to_class objspace->stress_to_class
969#define set_stress_to_class(c) (stress_to_class = (c))
970#else
971#define stress_to_class ((void)objspace, 0)
972#define set_stress_to_class(c) ((void)objspace, (c))
973#endif
974
975#if 0
976#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
977#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
978#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = (int)(b))
979#define dont_gc_val() (objspace->flags.dont_gc)
980#else
981#define dont_gc_on() (objspace->flags.dont_gc = 1)
982#define dont_gc_off() (objspace->flags.dont_gc = 0)
983#define dont_gc_set(b) (objspace->flags.dont_gc = (int)(b))
984#define dont_gc_val() (objspace->flags.dont_gc)
985#endif
986
987#define gc_config_full_mark_set(b) (objspace->gc_config.full_mark = (int)(b))
988#define gc_config_full_mark_val (objspace->gc_config.full_mark)
989
990#ifndef DURING_GC_COULD_MALLOC_REGION_START
991# define DURING_GC_COULD_MALLOC_REGION_START() \
992 assert(rb_during_gc()); \
993 bool _prev_enabled = rb_gc_impl_gc_enabled_p(objspace); \
994 rb_gc_impl_gc_disable(objspace, false)
995#endif
996
997#ifndef DURING_GC_COULD_MALLOC_REGION_END
998# define DURING_GC_COULD_MALLOC_REGION_END() \
999 if (_prev_enabled) rb_gc_impl_gc_enable(objspace)
1000#endif
1001
1002static inline enum gc_mode
1003gc_mode_verify(enum gc_mode mode)
1004{
1005#if RGENGC_CHECK_MODE > 0
1006 switch (mode) {
1007 case gc_mode_none:
1008 case gc_mode_marking:
1009 case gc_mode_sweeping:
1010 case gc_mode_compacting:
1011 break;
1012 default:
1013 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
1014 }
1015#endif
1016 return mode;
1017}
1018
1019static inline bool
1020has_sweeping_pages(rb_objspace_t *objspace)
1021{
1022 return objspace->sweeping_heap_count != 0;
1023}
1024
1025static inline size_t
1026heap_eden_total_pages(rb_objspace_t *objspace)
1027{
1028 size_t count = 0;
1029 for (int i = 0; i < HEAP_COUNT; i++) {
1030 count += (&heaps[i])->total_pages;
1031 }
1032 return count;
1033}
1034
1035static inline size_t
1036total_allocated_objects(rb_objspace_t *objspace)
1037{
1038 size_t count = 0;
1039 for (int i = 0; i < HEAP_COUNT; i++) {
1040 rb_heap_t *heap = &heaps[i];
1041 count += heap->total_allocated_objects;
1042 }
1043 return count;
1044}
1045
1046static inline size_t
1047total_freed_objects(rb_objspace_t *objspace)
1048{
1049 size_t count = 0;
1050 for (int i = 0; i < HEAP_COUNT; i++) {
1051 rb_heap_t *heap = &heaps[i];
1052 count += heap->total_freed_objects;
1053 }
1054 return count;
1055}
1056
1057static inline size_t
1058total_final_slots_count(rb_objspace_t *objspace)
1059{
1060 size_t count = 0;
1061 for (int i = 0; i < HEAP_COUNT; i++) {
1062 rb_heap_t *heap = &heaps[i];
1063 count += heap->final_slots_count;
1064 }
1065 return count;
1066}
1067
1068#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1069#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1070#define gc_needs_major_flags objspace->rgengc.need_major_gc
1071
1072#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1073#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1074#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1075#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1076#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1077/*
1078 * Byte budget for incremental sweep steps. Each step sweeps at most
1079 * this many bytes worth of slots before yielding. The effective slot
1080 * count per step is GC_INCREMENTAL_SWEEP_BYTES / heap->slot_size,
1081 * so larger slot pools (which are less heavily used) naturally get
1082 * fewer slots swept per step.
1083 *
1084 * Baseline: 2048 slots * RVALUE_SLOT_SIZE = 2048 * 40 = 81920 bytes,
1085 * preserving the historical behavior for the smallest heap.
1086 */
1087#define GC_INCREMENTAL_SWEEP_BYTES (2048 * RVALUE_SLOT_SIZE)
1088#define GC_INCREMENTAL_SWEEP_POOL_BYTES (1024 * RVALUE_SLOT_SIZE)
1089#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1090/* In lazy sweeping or the previous incremental marking finished and did not yield a free page. */
1091#define needs_continue_sweeping(objspace, heap) \
1092 ((heap)->free_pages == NULL && is_lazy_sweeping(objspace))
1093
1094#if SIZEOF_LONG == SIZEOF_VOIDP
1095# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1096#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1097# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1098 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1099#else
1100# error not supported
1101#endif
1102
1103struct RZombie {
1104 VALUE flags;
1105 VALUE next;
1106 void (*dfree)(void *);
1107 void *data;
1108};
1109
1110#define RZOMBIE(o) ((struct RZombie *)(o))
1111
1112static bool ruby_enable_autocompact = false;
1113#if RGENGC_CHECK_MODE
1114static gc_compact_compare_func ruby_autocompact_compare_func;
1115#endif
1116
1117static void init_mark_stack(mark_stack_t *stack);
1118static int garbage_collect(rb_objspace_t *, unsigned int reason);
1119
1120static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1121static void gc_rest(rb_objspace_t *objspace);
1122
1123enum gc_enter_event {
1124 gc_enter_event_start,
1125 gc_enter_event_continue,
1126 gc_enter_event_rest,
1127 gc_enter_event_finalizer,
1128};
1129
1130static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1131static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1132static void gc_marking_enter(rb_objspace_t *objspace);
1133static void gc_marking_exit(rb_objspace_t *objspace);
1134static void gc_sweeping_enter(rb_objspace_t *objspace);
1135static void gc_sweeping_exit(rb_objspace_t *objspace);
1136static bool gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1137
1138static void gc_sweep(rb_objspace_t *objspace);
1139static void gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap);
1140static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1141
1142static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1143static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1144static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1145
1146static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1147NO_SANITIZE("memory", static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr));
1148
1149static void gc_verify_internal_consistency(void *objspace_ptr);
1150
1151static double getrusage_time(void);
1152static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1153static inline void gc_prof_timer_start(rb_objspace_t *);
1154static inline void gc_prof_timer_stop(rb_objspace_t *);
1155static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1156static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1157static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1158static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1159static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1160static inline void gc_prof_set_heap_info(rb_objspace_t *);
1161
1162#define gc_prof_record(objspace) (objspace)->profile.current_record
1163#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1164
1165#ifdef HAVE_VA_ARGS_MACRO
1166# define gc_report(level, objspace, ...) \
1167 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1168#else
1169# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1170#endif
1171PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1172
1173static void gc_finalize_deferred(void *dmy);
1174
1175#if USE_TICK_T
1176
1177/* the following code is only for internal tuning. */
1178
1179/* Source code to use RDTSC is quoted and modified from
1180 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1181 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1182 */
1183
1184#if defined(__GNUC__) && defined(__i386__)
1185typedef unsigned long long tick_t;
1186#define PRItick "llu"
1187static inline tick_t
1188tick(void)
1189{
1190 unsigned long long int x;
1191 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1192 return x;
1193}
1194
1195#elif defined(__GNUC__) && defined(__x86_64__)
1196typedef unsigned long long tick_t;
1197#define PRItick "llu"
1198
1199static __inline__ tick_t
1200tick(void)
1201{
1202 unsigned long hi, lo;
1203 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1204 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1205}
1206
1207#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1208typedef unsigned long long tick_t;
1209#define PRItick "llu"
1210
1211static __inline__ tick_t
1212tick(void)
1213{
1214 unsigned long long val = __builtin_ppc_get_timebase();
1215 return val;
1216}
1217
1218#elif defined(__POWERPC__) && defined(__APPLE__)
1219/* Implementation for macOS PPC by @nobu
1220 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1221 */
1222typedef unsigned long long tick_t;
1223#define PRItick "llu"
1224
1225static __inline__ tick_t
1226tick(void)
1227{
1228 unsigned long int upper, lower, tmp;
1229 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1230 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1231 do {
1232 mftbu(upper);
1233 mftb(lower);
1234 mftbu(tmp);
1235 } while (tmp != upper);
1236 return ((tick_t)upper << 32) | lower;
1237}
1238
1239#elif defined(__aarch64__) && defined(__GNUC__)
1240typedef unsigned long tick_t;
1241#define PRItick "lu"
1242
1243static __inline__ tick_t
1244tick(void)
1245{
1246 unsigned long val;
1247 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1248 return val;
1249}
1250
1251
1252#elif defined(_WIN32) && defined(_MSC_VER)
1253#include <intrin.h>
1254typedef unsigned __int64 tick_t;
1255#define PRItick "llu"
1256
1257static inline tick_t
1258tick(void)
1259{
1260 return __rdtsc();
1261}
1262
1263#else /* use clock */
1264typedef clock_t tick_t;
1265#define PRItick "llu"
1266
1267static inline tick_t
1268tick(void)
1269{
1270 return clock();
1271}
1272#endif /* TSC */
1273#else /* USE_TICK_T */
1274#define MEASURE_LINE(expr) expr
1275#endif /* USE_TICK_T */
1276
1277static inline VALUE check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj);
1278
1279#define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1280#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1281#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1282#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1283#define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1284
1285static inline int
1286RVALUE_MARKED(rb_objspace_t *objspace, VALUE obj)
1287{
1288 check_rvalue_consistency(objspace, obj);
1289 return RVALUE_MARKED_BITMAP(obj) != 0;
1290}
1291
1292static inline int
1293RVALUE_PINNED(rb_objspace_t *objspace, VALUE obj)
1294{
1295 check_rvalue_consistency(objspace, obj);
1296 return RVALUE_PINNED_BITMAP(obj) != 0;
1297}
1298
1299static inline int
1300RVALUE_WB_UNPROTECTED(rb_objspace_t *objspace, VALUE obj)
1301{
1302 check_rvalue_consistency(objspace, obj);
1303 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1304}
1305
1306static inline int
1307RVALUE_MARKING(rb_objspace_t *objspace, VALUE obj)
1308{
1309 check_rvalue_consistency(objspace, obj);
1310 return RVALUE_MARKING_BITMAP(obj) != 0;
1311}
1312
1313static inline int
1314RVALUE_REMEMBERED(rb_objspace_t *objspace, VALUE obj)
1315{
1316 check_rvalue_consistency(objspace, obj);
1317 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1318}
1319
1320static inline int
1321RVALUE_UNCOLLECTIBLE(rb_objspace_t *objspace, VALUE obj)
1322{
1323 check_rvalue_consistency(objspace, obj);
1324 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1325}
1326
1327#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1328#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1329#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1330
1331static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1332static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1333static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1334
1335static int
1336check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int terminate)
1337{
1338 int err = 0;
1339
1340 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1341 {
1342 if (SPECIAL_CONST_P(obj)) {
1343 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1344 err++;
1345 }
1346 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1347 struct heap_page *empty_page = objspace->empty_pages;
1348 while (empty_page) {
1349 if ((uintptr_t)empty_page->body <= (uintptr_t)obj &&
1350 (uintptr_t)obj < (uintptr_t)empty_page->body + HEAP_PAGE_SIZE) {
1351 GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, empty_page));
1352 fprintf(stderr, "check_rvalue_consistency: %p is in an empty page (%p).\n",
1353 (void *)obj, (void *)empty_page);
1354 err++;
1355 goto skip;
1356 }
1357 }
1358 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1359 err++;
1360 skip:
1361 ;
1362 }
1363 else {
1364 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1365 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1366 const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
1367 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1368 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1369 const int age = RVALUE_AGE_GET((VALUE)obj);
1370
1371 if (heap_page_in_global_empty_pages_pool(objspace, GET_HEAP_PAGE(obj))) {
1372 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", rb_obj_info(obj));
1373 err++;
1374 }
1375 if (BUILTIN_TYPE(obj) == T_NONE) {
1376 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", rb_obj_info(obj));
1377 err++;
1378 }
1379 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1380 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", rb_obj_info(obj));
1381 err++;
1382 }
1383
1384 if (BUILTIN_TYPE(obj) != T_DATA) {
1385 rb_obj_memsize_of((VALUE)obj);
1386 }
1387
1388 /* check generation
1389 *
1390 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1391 */
1392 if (age > 0 && wb_unprotected_bit) {
1393 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", rb_obj_info(obj), age);
1394 err++;
1395 }
1396
1397 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1398 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", rb_obj_info(obj));
1399 err++;
1400 }
1401
1402 if (!is_full_marking(objspace)) {
1403 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1404 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1405 rb_obj_info(obj), age);
1406 err++;
1407 }
1408 if (remembered_bit && age != RVALUE_OLD_AGE) {
1409 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1410 rb_obj_info(obj), age);
1411 err++;
1412 }
1413 }
1414
1415 /*
1416 * check coloring
1417 *
1418 * marking:false marking:true
1419 * marked:false white *invalid*
1420 * marked:true black grey
1421 */
1422 if (is_incremental_marking(objspace) && marking_bit) {
1423 if (!is_marking(objspace) && !mark_bit) {
1424 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", rb_obj_info(obj));
1425 err++;
1426 }
1427 }
1428 }
1429 }
1430 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1431
1432 if (err > 0 && terminate) {
1433 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1434 }
1435 return err;
1436}
1437
1438#if RGENGC_CHECK_MODE == 0
1439static inline VALUE
1440check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1441{
1442 return obj;
1443}
1444#else
1445static VALUE
1446check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1447{
1448 check_rvalue_consistency_force(objspace, obj, TRUE);
1449 return obj;
1450}
1451#endif
1452
1453static inline bool
1454gc_object_moved_p(rb_objspace_t *objspace, VALUE obj)
1455{
1456
1457 bool ret;
1458 asan_unpoisoning_object(obj) {
1459 ret = BUILTIN_TYPE(obj) == T_MOVED;
1460 }
1461 return ret;
1462}
1463
1464static inline int
1465RVALUE_OLD_P(rb_objspace_t *objspace, VALUE obj)
1466{
1467 GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
1468 check_rvalue_consistency(objspace, obj);
1469 // Because this will only ever be called on GC controlled objects,
1470 // we can use the faster _RAW function here
1471 return RB_OBJ_PROMOTED_RAW(obj);
1472}
1473
1474static inline void
1475RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1476{
1477 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1478 objspace->rgengc.old_objects++;
1479
1480#if RGENGC_PROFILE >= 2
1481 objspace->profile.total_promoted_count++;
1482 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1483#endif
1484}
1485
1486static inline void
1487RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1488{
1489 RB_DEBUG_COUNTER_INC(obj_promote);
1490 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1491}
1492
1493/* set age to age+1 */
1494static inline void
1495RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1496{
1497 int age = RVALUE_AGE_GET((VALUE)obj);
1498
1499 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1500 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", rb_obj_info(obj));
1501 }
1502
1503 age++;
1504 RVALUE_AGE_SET(obj, age);
1505
1506 if (age == RVALUE_OLD_AGE) {
1507 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1508 }
1509
1510 check_rvalue_consistency(objspace, obj);
1511}
1512
1513static inline void
1514RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1515{
1516 check_rvalue_consistency(objspace, obj);
1517 GC_ASSERT(!RVALUE_OLD_P(objspace, obj));
1518 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1519 check_rvalue_consistency(objspace, obj);
1520}
1521
1522static inline void
1523RVALUE_AGE_RESET(VALUE obj)
1524{
1525 RVALUE_AGE_SET(obj, 0);
1526}
1527
1528static inline void
1529RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1530{
1531 check_rvalue_consistency(objspace, obj);
1532 GC_ASSERT(RVALUE_OLD_P(objspace, obj));
1533
1534 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(objspace, obj)) {
1535 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1536 }
1537
1538 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1539 RVALUE_AGE_RESET(obj);
1540
1541 if (RVALUE_MARKED(objspace, obj)) {
1542 objspace->rgengc.old_objects--;
1543 }
1544
1545 check_rvalue_consistency(objspace, obj);
1546}
1547
1548static inline int
1549RVALUE_BLACK_P(rb_objspace_t *objspace, VALUE obj)
1550{
1551 return RVALUE_MARKED(objspace, obj) && !RVALUE_MARKING(objspace, obj);
1552}
1553
1554static inline int
1555RVALUE_WHITE_P(rb_objspace_t *objspace, VALUE obj)
1556{
1557 return !RVALUE_MARKED(objspace, obj);
1558}
1559
1560bool
1561rb_gc_impl_gc_enabled_p(void *objspace_ptr)
1562{
1563 rb_objspace_t *objspace = objspace_ptr;
1564 return !dont_gc_val();
1565}
1566
1567void
1568rb_gc_impl_gc_enable(void *objspace_ptr)
1569{
1570 rb_objspace_t *objspace = objspace_ptr;
1571
1572 dont_gc_off();
1573}
1574
1575void
1576rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
1577{
1578 rb_objspace_t *objspace = objspace_ptr;
1579
1580 if (finish_current_gc) {
1581 gc_rest(objspace);
1582 }
1583
1584 dont_gc_on();
1585}
1586
1587/*
1588 --------------------------- ObjectSpace -----------------------------
1589*/
1590
1591static inline void *
1592calloc1(size_t n)
1593{
1594 return calloc(1, n);
1595}
1596
1597void
1598rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event)
1599{
1600 rb_objspace_t *objspace = objspace_ptr;
1601 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
1602}
1603
1604unsigned long long
1605rb_gc_impl_get_total_time(void *objspace_ptr)
1606{
1607 rb_objspace_t *objspace = objspace_ptr;
1608
1609 unsigned long long marking_time = objspace->profile.marking_time_ns;
1610 unsigned long long sweeping_time = objspace->profile.sweeping_time_ns;
1611
1612 return marking_time + sweeping_time;
1613}
1614
1615void
1616rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1617{
1618 rb_objspace_t *objspace = objspace_ptr;
1619
1620 objspace->flags.measure_gc = RTEST(flag) ? TRUE : FALSE;
1621}
1622
1623bool
1624rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1625{
1626 rb_objspace_t *objspace = objspace_ptr;
1627
1628 return objspace->flags.measure_gc;
1629}
1630
1631/* garbage objects will be collected soon. */
1632bool
1633rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr)
1634{
1635 rb_objspace_t *objspace = objspace_ptr;
1636
1637 bool dead = false;
1638
1639 asan_unpoisoning_object(ptr) {
1640 switch (BUILTIN_TYPE(ptr)) {
1641 case T_NONE:
1642 case T_MOVED:
1643 case T_ZOMBIE:
1644 dead = true;
1645 break;
1646 default:
1647 break;
1648 }
1649 }
1650
1651 if (dead) return true;
1652 return is_lazy_sweeping(objspace) && GET_HEAP_PAGE(ptr)->flags.before_sweep &&
1653 !RVALUE_MARKED(objspace, ptr);
1654}
1655
1656struct rb_gc_vm_context *
1657rb_gc_impl_get_vm_context(void *objspace_ptr)
1658{
1659 rb_objspace_t *objspace = objspace_ptr;
1660
1661 return &objspace->vm_context;
1662}
1663
1664static void free_stack_chunks(mark_stack_t *);
1665static void mark_stack_free_cache(mark_stack_t *);
1666static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1667
1668static inline void
1669heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1670{
1671 rb_asan_unpoison_object(obj, false);
1672
1673 asan_unlock_freelist(page);
1674
1675 struct free_slot *slot = (struct free_slot *)obj;
1676 slot->flags = 0;
1677 slot->next = page->freelist;
1678 page->freelist = slot;
1679 asan_lock_freelist(page);
1680
1681 // Should have already been reset
1682 GC_ASSERT(RVALUE_AGE_GET(obj) == 0);
1683
1684 if (RGENGC_CHECK_MODE &&
1685 /* obj should belong to page */
1686 !(page->start <= (uintptr_t)obj &&
1687 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1688 obj % sizeof(VALUE) == 0)) {
1689 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)obj);
1690 }
1691
1692 rb_asan_poison_object(obj);
1693 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1694}
1695
1696static void
1697heap_allocatable_bytes_expand(rb_objspace_t *objspace,
1698 rb_heap_t *heap, size_t free_slots, size_t total_slots, size_t slot_size)
1699{
1700 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1701 size_t target_total_slots;
1702
1703 if (goal_ratio == 0.0) {
1704 target_total_slots = (size_t)(total_slots * gc_params.growth_factor);
1705 }
1706 else if (total_slots == 0) {
1707 target_total_slots = gc_params.heap_init_bytes / slot_size;
1708 }
1709 else {
1710 /* Find `f' where free_slots = f * total_slots * goal_ratio
1711 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1712 */
1713 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1714
1715 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1716 if (f < 1.0) f = 1.1;
1717
1718 target_total_slots = (size_t)(f * total_slots);
1719
1720 if (0) {
1721 fprintf(stderr,
1722 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1723 " G(%1.2f), f(%1.2f),"
1724 " total_slots(%8"PRIuSIZE") => target_total_slots(%8"PRIuSIZE")\n",
1725 free_slots, total_slots, free_slots/(double)total_slots,
1726 goal_ratio, f, total_slots, target_total_slots);
1727 }
1728 }
1729
1730 if (gc_params.growth_max_bytes > 0) {
1731 size_t max_total_slots = total_slots + gc_params.growth_max_bytes / slot_size;
1732 if (target_total_slots > max_total_slots) target_total_slots = max_total_slots;
1733 }
1734
1735 size_t extend_slot_count = target_total_slots - total_slots;
1736 /* Extend by at least 1 page. */
1737 if (extend_slot_count == 0) extend_slot_count = 1;
1738
1739 objspace->heap_pages.allocatable_bytes += extend_slot_count * slot_size;
1740}
1741
1742static inline void
1743heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1744{
1745 asan_unlock_freelist(page);
1746 GC_ASSERT(page->free_slots != 0);
1747 GC_ASSERT(page->freelist != NULL);
1748
1749 page->free_next = heap->free_pages;
1750 heap->free_pages = page;
1751
1752 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
1753
1754 asan_lock_freelist(page);
1755}
1756
1757static inline void
1758heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1759{
1760 asan_unlock_freelist(page);
1761 GC_ASSERT(page->free_slots != 0);
1762 GC_ASSERT(page->freelist != NULL);
1763
1764 page->free_next = heap->pooled_pages;
1765 heap->pooled_pages = page;
1766 objspace->rincgc.pooled_slots += page->free_slots;
1767
1768 asan_lock_freelist(page);
1769}
1770
1771static void
1772heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1773{
1774 ccan_list_del(&page->page_node);
1775 heap->total_pages--;
1776 heap->total_slots -= page->total_slots;
1777}
1778
1779static void
1780gc_aligned_free(void *ptr, size_t size)
1781{
1782#if defined __MINGW32__
1783 __mingw_aligned_free(ptr);
1784#elif defined _WIN32
1785 _aligned_free(ptr);
1786#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
1787 free(ptr);
1788#else
1789 free(((void**)ptr)[-1]);
1790#endif
1791}
1792
1793static void
1794heap_page_body_free(struct heap_page_body *page_body)
1795{
1796 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1797
1798 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1799#ifdef HAVE_MMAP
1800 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
1801 if (munmap(page_body, HEAP_PAGE_SIZE)) {
1802 rb_bug("heap_page_body_free: munmap failed");
1803 }
1804#endif
1805 }
1806 else {
1807 gc_aligned_free(page_body, HEAP_PAGE_SIZE);
1808 }
1809}
1810
1811static void
1812heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1813{
1814 objspace->heap_pages.freed_pages++;
1815 heap_page_body_free(page->body);
1816 free(page);
1817}
1818
1819static void
1820heap_pages_free_unused_pages(rb_objspace_t *objspace)
1821{
1822 if (objspace->empty_pages != NULL && heap_pages_freeable_pages > 0) {
1823 GC_ASSERT(objspace->empty_pages_count > 0);
1824 objspace->empty_pages = NULL;
1825 objspace->empty_pages_count = 0;
1826
1827 size_t i, j;
1828 for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
1829 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
1830
1831 if (heap_page_in_global_empty_pages_pool(objspace, page) && heap_pages_freeable_pages > 0) {
1832 heap_page_free(objspace, page);
1833 heap_pages_freeable_pages--;
1834 }
1835 else {
1836 if (heap_page_in_global_empty_pages_pool(objspace, page)) {
1837 page->free_next = objspace->empty_pages;
1838 objspace->empty_pages = page;
1839 objspace->empty_pages_count++;
1840 }
1841
1842 if (i != j) {
1843 rb_darray_set(objspace->heap_pages.sorted, j, page);
1844 }
1845 j++;
1846 }
1847 }
1848
1849 rb_darray_pop(objspace->heap_pages.sorted, i - j);
1850 GC_ASSERT(rb_darray_size(objspace->heap_pages.sorted) == j);
1851
1852 struct heap_page *hipage = rb_darray_get(objspace->heap_pages.sorted, rb_darray_size(objspace->heap_pages.sorted) - 1);
1853 uintptr_t himem = (uintptr_t)hipage->body + HEAP_PAGE_SIZE;
1854 GC_ASSERT(himem <= heap_pages_himem);
1855 heap_pages_himem = himem;
1856
1857 struct heap_page *lopage = rb_darray_get(objspace->heap_pages.sorted, 0);
1858 uintptr_t lomem = (uintptr_t)lopage->body + sizeof(struct heap_page_header);
1859 GC_ASSERT(lomem >= heap_pages_lomem);
1860 heap_pages_lomem = lomem;
1861 }
1862}
1863
1864static void *
1865gc_aligned_malloc(size_t alignment, size_t size)
1866{
1867 /* alignment must be a power of 2 */
1868 GC_ASSERT(((alignment - 1) & alignment) == 0);
1869 GC_ASSERT(alignment % sizeof(void*) == 0);
1870
1871 void *res;
1872
1873#if defined __MINGW32__
1874 res = __mingw_aligned_malloc(size, alignment);
1875#elif defined _WIN32
1876 void *_aligned_malloc(size_t, size_t);
1877 res = _aligned_malloc(size, alignment);
1878#elif defined(HAVE_POSIX_MEMALIGN)
1879 if (posix_memalign(&res, alignment, size) != 0) {
1880 return NULL;
1881 }
1882#elif defined(HAVE_MEMALIGN)
1883 res = memalign(alignment, size);
1884#else
1885 char* aligned;
1886 res = malloc(alignment + size + sizeof(void*));
1887 aligned = (char*)res + alignment + sizeof(void*);
1888 aligned -= ((VALUE)aligned & (alignment - 1));
1889 ((void**)aligned)[-1] = res;
1890 res = (void*)aligned;
1891#endif
1892
1893 GC_ASSERT((uintptr_t)res % alignment == 0);
1894
1895 return res;
1896}
1897
1898static struct heap_page_body *
1899heap_page_body_allocate(void)
1900{
1901 struct heap_page_body *page_body;
1902
1903 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1904#ifdef HAVE_MMAP
1905 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
1906
1907 size_t mmap_size = HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE;
1908 char *ptr = mmap(NULL, mmap_size,
1909 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1910 if (ptr == MAP_FAILED) {
1911 return NULL;
1912 }
1913
1914 // If we are building `default.c` as part of the ruby executable, we
1915 // may just call `ruby_annotate_mmap`. But if we are building
1916 // `default.c` as a shared library, we will not have access to private
1917 // symbols, and we have to either call prctl directly or make our own
1918 // wrapper.
1919#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
1920 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, mmap_size, "Ruby:GC:default:heap_page_body_allocate");
1921 errno = 0;
1922#endif
1923
1924 char *aligned = ptr + HEAP_PAGE_ALIGN;
1925 aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
1926 GC_ASSERT(aligned > ptr);
1927 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
1928
1929 size_t start_out_of_range_size = aligned - ptr;
1930 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1931 if (start_out_of_range_size > 0) {
1932 if (munmap(ptr, start_out_of_range_size)) {
1933 rb_bug("heap_page_body_allocate: munmap failed for start");
1934 }
1935 }
1936
1937 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
1938 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1939 if (end_out_of_range_size > 0) {
1940 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
1941 rb_bug("heap_page_body_allocate: munmap failed for end");
1942 }
1943 }
1944
1945 page_body = (struct heap_page_body *)aligned;
1946#endif
1947 }
1948 else {
1949 page_body = gc_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1950 }
1951
1952 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1953
1954 return page_body;
1955}
1956
1957static struct heap_page *
1958heap_page_resurrect(rb_objspace_t *objspace)
1959{
1960 struct heap_page *page = NULL;
1961 if (objspace->empty_pages == NULL) {
1962 GC_ASSERT(objspace->empty_pages_count == 0);
1963 }
1964 else {
1965 GC_ASSERT(objspace->empty_pages_count > 0);
1966 objspace->empty_pages_count--;
1967 page = objspace->empty_pages;
1968 objspace->empty_pages = page->free_next;
1969 }
1970
1971 return page;
1972}
1973
1974static struct heap_page *
1975heap_page_allocate(rb_objspace_t *objspace)
1976{
1977 struct heap_page_body *page_body = heap_page_body_allocate();
1978 if (page_body == 0) {
1979 rb_memerror();
1980 }
1981
1982 struct heap_page *page = calloc1(sizeof(struct heap_page));
1983 if (page == 0) {
1984 heap_page_body_free(page_body);
1985 rb_memerror();
1986 }
1987
1988 uintptr_t start = (uintptr_t)page_body + sizeof(struct heap_page_header);
1989 uintptr_t end = (uintptr_t)page_body + HEAP_PAGE_SIZE;
1990
1991 size_t lo = 0;
1992 size_t hi = rb_darray_size(objspace->heap_pages.sorted);
1993 while (lo < hi) {
1994 struct heap_page *mid_page;
1995
1996 size_t mid = (lo + hi) / 2;
1997 mid_page = rb_darray_get(objspace->heap_pages.sorted, mid);
1998 if ((uintptr_t)mid_page->start < start) {
1999 lo = mid + 1;
2000 }
2001 else if ((uintptr_t)mid_page->start > start) {
2002 hi = mid;
2003 }
2004 else {
2005 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2006 }
2007 }
2008
2009 rb_darray_insert_without_gc(&objspace->heap_pages.sorted, hi, page);
2010
2011 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2012 if (heap_pages_himem < end) heap_pages_himem = end;
2013
2014 page->body = page_body;
2015 page_body->header.page = page;
2016
2017 objspace->heap_pages.allocated_pages++;
2018
2019 return page;
2020}
2021
2022static void
2023heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2024{
2025 /* Adding to eden heap during incremental sweeping is forbidden */
2026 GC_ASSERT(!heap->sweeping_page);
2027 GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, page));
2028
2029 /* Align start to slot_size boundary */
2030 uintptr_t start = (uintptr_t)page->body + sizeof(struct heap_page_header);
2031 uintptr_t rem = start % heap->slot_size;
2032 if (rem) start += heap->slot_size - rem;
2033
2034 int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size);
2035
2036 page->start = start;
2037 page->total_slots = slot_count;
2038 page->slot_size = heap->slot_size;
2039 page->slot_size_reciprocal = heap_slot_reciprocal_table[heap - heaps];
2040 page->heap = heap;
2041
2042 memset(&page->wb_unprotected_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
2043 memset(&page->age_bits[0], 0, sizeof(page->age_bits));
2044
2045 asan_unlock_freelist(page);
2046 page->freelist = NULL;
2047 asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE, false);
2048 for (VALUE p = (VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) {
2049 heap_page_add_freeobj(objspace, page, p);
2050 }
2051 asan_lock_freelist(page);
2052
2053 page->free_slots = slot_count;
2054
2055 heap->total_allocated_pages++;
2056
2057 ccan_list_add_tail(&heap->pages, &page->page_node);
2058 heap->total_pages++;
2059 heap->total_slots += page->total_slots;
2060}
2061
2062static int
2063heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap)
2064{
2065 gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
2066 "allocatable_bytes: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2067 rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_bytes, heap->total_pages);
2068
2069 bool allocated = false;
2070 struct heap_page *page = heap_page_resurrect(objspace);
2071
2072 if (page == NULL && objspace->heap_pages.allocatable_bytes > 0) {
2073 page = heap_page_allocate(objspace);
2074 allocated = true;
2075
2076 GC_ASSERT(page != NULL);
2077 }
2078
2079 if (page != NULL) {
2080 heap_add_page(objspace, heap, page);
2081 heap_add_freepage(heap, page);
2082
2083 if (allocated) {
2084 size_t page_bytes = (size_t)page->total_slots * page->slot_size;
2085 if (objspace->heap_pages.allocatable_bytes > page_bytes) {
2086 objspace->heap_pages.allocatable_bytes -= page_bytes;
2087 }
2088 else {
2089 objspace->heap_pages.allocatable_bytes = 0;
2090 }
2091 }
2092 }
2093
2094 return page != NULL;
2095}
2096
2097static void
2098heap_page_allocate_and_initialize_force(rb_objspace_t *objspace, rb_heap_t *heap)
2099{
2100 size_t prev_allocatable_bytes = objspace->heap_pages.allocatable_bytes;
2101 objspace->heap_pages.allocatable_bytes = HEAP_PAGE_SIZE;
2102 heap_page_allocate_and_initialize(objspace, heap);
2103 GC_ASSERT(heap->free_pages != NULL);
2104 objspace->heap_pages.allocatable_bytes = prev_allocatable_bytes;
2105}
2106
2107static void
2108gc_continue(rb_objspace_t *objspace, rb_heap_t *heap)
2109{
2110 unsigned int lock_lev;
2111 bool needs_gc = is_incremental_marking(objspace) || needs_continue_sweeping(objspace, heap);
2112 if (!needs_gc) return;
2113
2114 gc_enter(objspace, gc_enter_event_continue, &lock_lev); // takes vm barrier, try to avoid
2115
2116 /* Continue marking if in incremental marking. */
2117 if (is_incremental_marking(objspace)) {
2118 if (gc_marks_continue(objspace, heap)) {
2119 gc_sweep(objspace);
2120 }
2121 }
2122
2123 if (needs_continue_sweeping(objspace, heap)) {
2124 gc_sweep_continue(objspace, heap);
2125 }
2126
2127 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2128}
2129
2130static void
2131heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2132{
2133 GC_ASSERT(heap->free_pages == NULL);
2134
2135 if (heap->total_slots < gc_params.heap_init_bytes / heap->slot_size &&
2136 heap->sweeping_page == NULL) {
2137 heap_page_allocate_and_initialize_force(objspace, heap);
2138 GC_ASSERT(heap->free_pages != NULL);
2139 return;
2140 }
2141
2142 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2143 gc_continue(objspace, heap);
2144
2145 if (heap->free_pages == NULL) {
2146 heap_page_allocate_and_initialize(objspace, heap);
2147 }
2148
2149 /* If we still don't have a free page and not allowed to create a new page,
2150 * we should start a new GC cycle. */
2151 if (heap->free_pages == NULL) {
2152 GC_ASSERT(objspace->empty_pages_count == 0);
2153 GC_ASSERT(objspace->heap_pages.allocatable_bytes == 0);
2154
2155 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2156 rb_memerror();
2157 }
2158 else {
2159 if (objspace->heap_pages.allocatable_bytes == 0 && !gc_config_full_mark_val) {
2160 heap_allocatable_bytes_expand(objspace, heap,
2161 heap->freed_slots + heap->empty_slots,
2162 heap->total_slots, heap->slot_size);
2163 GC_ASSERT(objspace->heap_pages.allocatable_bytes > 0);
2164 }
2165 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2166 gc_continue(objspace, heap);
2167
2168 /* If we're not incremental marking (e.g. a minor GC) or finished
2169 * sweeping and still don't have a free page, then
2170 * gc_sweep_finish_heap should allow us to create a new page. */
2171 if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, heap)) {
2172 if (gc_needs_major_flags == GPR_FLAG_NONE) {
2173 rb_bug("cannot create a new page after GC");
2174 }
2175 else { // Major GC is required, which will allow us to create new page
2176 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2177 rb_memerror();
2178 }
2179 else {
2180 /* Do steps of incremental marking or lazy sweeping. */
2181 gc_continue(objspace, heap);
2182
2183 if (heap->free_pages == NULL &&
2184 !heap_page_allocate_and_initialize(objspace, heap)) {
2185 rb_bug("cannot create a new page after major GC");
2186 }
2187 }
2188 }
2189 }
2190 }
2191 }
2192
2193 GC_ASSERT(heap->free_pages != NULL);
2194}
2195
2196#if GC_DEBUG
2197static inline const char*
2198rb_gc_impl_source_location_cstr(int *ptr)
2199{
2200 /* We could directly refer `rb_source_location_cstr()` before, but not any
2201 * longer. We have to heavy lift using our debugging API. */
2202 if (! ptr) {
2203 return NULL;
2204 }
2205 else if (! (*ptr = rb_sourceline())) {
2206 return NULL;
2207 }
2208 else {
2209 return rb_sourcefile();
2210 }
2211}
2212#endif
2213
2214static inline VALUE
2215newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2216{
2217 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2218 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2219 RBASIC(obj)->flags = flags;
2220 *((VALUE *)&RBASIC(obj)->klass) = klass;
2221#if RBASIC_SHAPE_ID_FIELD
2222 RBASIC(obj)->shape_id = 0;
2223#endif
2224
2225
2226#if RACTOR_CHECK_MODE
2227 void rb_ractor_setup_belonging(VALUE obj);
2228 rb_ractor_setup_belonging(obj);
2229#endif
2230
2231#if RGENGC_CHECK_MODE
2232 int lev = RB_GC_VM_LOCK_NO_BARRIER();
2233 {
2234 check_rvalue_consistency(objspace, obj);
2235
2236 GC_ASSERT(RVALUE_MARKED(objspace, obj) == FALSE);
2237 GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
2238 GC_ASSERT(RVALUE_OLD_P(objspace, obj) == FALSE);
2239 GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, obj) == FALSE);
2240
2241 if (RVALUE_REMEMBERED(objspace, obj)) rb_bug("newobj: %s is remembered.", rb_obj_info(obj));
2242 }
2243 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
2244#endif
2245
2246 if (RB_UNLIKELY(wb_protected == FALSE)) {
2247 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2248 }
2249
2250#if RGENGC_PROFILE
2251 if (wb_protected) {
2252 objspace->profile.total_generated_normal_object_count++;
2253#if RGENGC_PROFILE >= 2
2254 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2255#endif
2256 }
2257 else {
2258 objspace->profile.total_generated_shady_object_count++;
2259#if RGENGC_PROFILE >= 2
2260 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2261#endif
2262 }
2263#endif
2264
2265#if GC_DEBUG
2266 GET_RVALUE_OVERHEAD(obj)->file = rb_gc_impl_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
2267 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2268#endif
2269
2270 gc_report(5, objspace, "newobj: %s\n", rb_obj_info(obj));
2271
2272 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
2273 return obj;
2274}
2275
2276size_t
2277rb_gc_impl_obj_slot_size(VALUE obj)
2278{
2279 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2280}
2281
2282static inline size_t
2283heap_slot_size(unsigned char pool_id)
2284{
2285 GC_ASSERT(pool_id < HEAP_COUNT);
2286
2287 return pool_slot_sizes[pool_id] - RVALUE_OVERHEAD;
2288}
2289
2290bool
2291rb_gc_impl_size_allocatable_p(size_t size)
2292{
2293 return size + RVALUE_OVERHEAD <= pool_slot_sizes[HEAP_COUNT - 1];
2294}
2295
2296static const size_t ALLOCATED_COUNT_STEP = 1024;
2297static void
2298ractor_cache_flush_count(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache)
2299{
2300 for (int heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
2301 rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2302
2303 rb_heap_t *heap = &heaps[heap_idx];
2304 RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
2305 heap_cache->allocated_objects_count = 0;
2306 }
2307}
2308
2309static inline VALUE
2310ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2311 size_t heap_idx)
2312{
2313 rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2314 struct free_slot *p = heap_cache->freelist;
2315
2316 if (RB_UNLIKELY(is_incremental_marking(objspace))) {
2317 // Not allowed to allocate without running an incremental marking step
2318 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2319 return Qfalse;
2320 }
2321
2322 if (p) {
2323 cache->incremental_mark_step_allocated_slots++;
2324 }
2325 }
2326
2327 if (RB_LIKELY(p)) {
2328 VALUE obj = (VALUE)p;
2329 rb_asan_unpoison_object(obj, true);
2330 heap_cache->freelist = p->next;
2331
2332 heap_cache->allocated_objects_count++;
2333 rb_heap_t *heap = &heaps[heap_idx];
2334 if (heap_cache->allocated_objects_count >= ALLOCATED_COUNT_STEP) {
2335 RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
2336 heap_cache->allocated_objects_count = 0;
2337 }
2338
2339#if RGENGC_CHECK_MODE
2340 GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == heap_slot_size(heap_idx));
2341 // zero clear
2342 MEMZERO((char *)obj, char, heap_slot_size(heap_idx));
2343#endif
2344 return obj;
2345 }
2346 else {
2347 return Qfalse;
2348 }
2349}
2350
2351static struct heap_page *
2352heap_next_free_page(rb_objspace_t *objspace, rb_heap_t *heap)
2353{
2354 struct heap_page *page;
2355
2356 if (heap->free_pages == NULL) {
2357 heap_prepare(objspace, heap);
2358 }
2359
2360 page = heap->free_pages;
2361 heap->free_pages = page->free_next;
2362
2363 GC_ASSERT(page->free_slots != 0);
2364
2365 asan_unlock_freelist(page);
2366
2367 return page;
2368}
2369
2370static inline void
2371ractor_cache_set_page(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx,
2372 struct heap_page *page)
2373{
2374 gc_report(3, objspace, "ractor_set_cache: Using page %p\n", (void *)page->body);
2375
2376 rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2377
2378 GC_ASSERT(heap_cache->freelist == NULL);
2379 GC_ASSERT(page->free_slots != 0);
2380 GC_ASSERT(page->freelist != NULL);
2381
2382 heap_cache->using_page = page;
2383 heap_cache->freelist = page->freelist;
2384 page->free_slots = 0;
2385 page->freelist = NULL;
2386
2387 rb_asan_unpoison_object((VALUE)heap_cache->freelist, false);
2388 GC_ASSERT(RB_TYPE_P((VALUE)heap_cache->freelist, T_NONE));
2389 rb_asan_poison_object((VALUE)heap_cache->freelist);
2390}
2391
2392static void
2393init_size_to_heap_idx(void)
2394{
2395 for (size_t i = 0; i < sizeof(size_to_heap_idx); i++) {
2396 size_t effective = i * 8 + RVALUE_OVERHEAD;
2397 uint8_t idx;
2398 for (idx = 0; idx < HEAP_COUNT; idx++) {
2399 if (effective <= pool_slot_sizes[idx]) break;
2400 }
2401 size_to_heap_idx[i] = idx;
2402 }
2403}
2404
2405static inline size_t
2406heap_idx_for_size(size_t size)
2407{
2408 size_t compressed = (size + 7) >> 3;
2409 if (compressed < sizeof(size_to_heap_idx)) {
2410 size_t heap_idx = size_to_heap_idx[compressed];
2411 if (RB_LIKELY(heap_idx < HEAP_COUNT)) return heap_idx;
2412 }
2413
2414 rb_bug("heap_idx_for_size: allocation size too large "
2415 "(size=%"PRIuSIZE")", size);
2416}
2417
2418size_t
2419rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
2420{
2421 return heap_idx_for_size(size);
2422}
2423
2424
2425static size_t heap_sizes[HEAP_COUNT + 1] = { 0 };
2426
2427size_t *
2428rb_gc_impl_heap_sizes(void *objspace_ptr)
2429{
2430 if (heap_sizes[0] == 0) {
2431 for (unsigned char i = 0; i < HEAP_COUNT; i++) {
2432 heap_sizes[i] = heap_slot_size(i);
2433 }
2434 }
2435
2436 return heap_sizes;
2437}
2438
2439NOINLINE(static VALUE newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked));
2440
2441static VALUE
2442newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2443{
2444 rb_heap_t *heap = &heaps[heap_idx];
2445 VALUE obj = Qfalse;
2446
2447 unsigned int lev = 0;
2448 bool unlock_vm = false;
2449
2450 if (!vm_locked) {
2451 lev = RB_GC_CR_LOCK();
2452 unlock_vm = true;
2453 }
2454
2455 {
2456 if (is_incremental_marking(objspace)) {
2457 gc_continue(objspace, heap);
2458 cache->incremental_mark_step_allocated_slots = 0;
2459
2460 // Retry allocation after resetting incremental_mark_step_allocated_slots
2461 obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2462 }
2463
2464 if (obj == Qfalse) {
2465 // Get next free page (possibly running GC)
2466 struct heap_page *page = heap_next_free_page(objspace, heap);
2467 ractor_cache_set_page(objspace, cache, heap_idx, page);
2468
2469 // Retry allocation after moving to new page
2470 obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2471 }
2472 }
2473
2474 if (unlock_vm) {
2475 RB_GC_CR_UNLOCK(lev);
2476 }
2477
2478 if (RB_UNLIKELY(obj == Qfalse)) {
2479 rb_memerror();
2480 }
2481 return obj;
2482}
2483
2484static VALUE
2485newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2486{
2487 VALUE obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2488
2489 if (RB_UNLIKELY(obj == Qfalse)) {
2490 obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
2491 }
2492
2493 return obj;
2494}
2495
2496ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx));
2497
2498static inline VALUE
2499newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)
2500{
2501 VALUE obj;
2502 unsigned int lev;
2503
2504 lev = RB_GC_CR_LOCK();
2505 {
2506 if (RB_UNLIKELY(during_gc || ruby_gc_stressful)) {
2507 if (during_gc) {
2508 dont_gc_on();
2509 during_gc = 0;
2510 if (rb_memerror_reentered()) {
2511 rb_memerror();
2512 }
2513 rb_bug("object allocation during garbage collection phase");
2514 }
2515
2516 if (ruby_gc_stressful) {
2517 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2518 rb_memerror();
2519 }
2520 }
2521 }
2522
2523 obj = newobj_alloc(objspace, cache, heap_idx, true);
2524 newobj_init(klass, flags, wb_protected, objspace, obj);
2525 }
2526 RB_GC_CR_UNLOCK(lev);
2527
2528 return obj;
2529}
2530
2531NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2532 rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2533NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2534 rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2535
2536static VALUE
2537newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2538{
2539 return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx);
2540}
2541
2542static VALUE
2543newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2544{
2545 return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx);
2546}
2547
2548VALUE
2549rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
2550{
2551 VALUE obj;
2552 rb_objspace_t *objspace = objspace_ptr;
2553
2554 RB_DEBUG_COUNTER_INC(obj_newobj);
2555 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2556
2557 if (RB_UNLIKELY(stress_to_class)) {
2558 if (rb_hash_lookup2(stress_to_class, klass, Qundef) != Qundef) {
2559 rb_memerror();
2560 }
2561 }
2562
2563 size_t heap_idx = heap_idx_for_size(alloc_size);
2564
2566
2567 if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
2568 wb_protected) {
2569 obj = newobj_alloc(objspace, cache, heap_idx, false);
2570 newobj_init(klass, flags, wb_protected, objspace, obj);
2571 }
2572 else {
2573 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2574
2575 obj = wb_protected ?
2576 newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) :
2577 newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx);
2578 }
2579
2580 return obj;
2581}
2582
2583static int
2584ptr_in_page_body_p(const void *ptr, const void *memb)
2585{
2586 struct heap_page *page = *(struct heap_page **)memb;
2587 uintptr_t p_body = (uintptr_t)page->body;
2588
2589 if ((uintptr_t)ptr >= p_body) {
2590 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
2591 }
2592 else {
2593 return -1;
2594 }
2595}
2596
2597PUREFUNC(static inline struct heap_page *heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
2598static inline struct heap_page *
2599heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
2600{
2601 struct heap_page **res;
2602
2603 if (ptr < (uintptr_t)heap_pages_lomem ||
2604 ptr > (uintptr_t)heap_pages_himem) {
2605 return NULL;
2606 }
2607
2608 res = bsearch((void *)ptr, rb_darray_ref(objspace->heap_pages.sorted, 0),
2609 rb_darray_size(objspace->heap_pages.sorted), sizeof(struct heap_page *),
2610 ptr_in_page_body_p);
2611
2612 if (res) {
2613 return *res;
2614 }
2615 else {
2616 return NULL;
2617 }
2618}
2619
2620PUREFUNC(static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr);)
2621static inline bool
2622is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr)
2623{
2624 register uintptr_t p = (uintptr_t)ptr;
2625 register struct heap_page *page;
2626
2627 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2628
2629 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2630 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2631
2632 if (p % sizeof(VALUE) != 0) return FALSE;
2633 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2634
2635 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
2636 if (page) {
2637 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2638 if (heap_page_in_global_empty_pages_pool(objspace, page)) {
2639 return FALSE;
2640 }
2641 else {
2642 if (p < page->start) return FALSE;
2643 if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
2644 if ((p - page->start) % page->slot_size != 0) return FALSE;
2645
2646 return TRUE;
2647 }
2648 }
2649 return FALSE;
2650}
2651
2652bool
2653rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
2654{
2655 return is_pointer_to_heap(objspace_ptr, ptr);
2656}
2657
2658#define ZOMBIE_OBJ_KEPT_FLAGS (FL_FINALIZE)
2659
2660void
2661rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
2662{
2663 rb_objspace_t *objspace = objspace_ptr;
2664
2665 struct RZombie *zombie = RZOMBIE(obj);
2666 zombie->flags = T_ZOMBIE | (zombie->flags & ZOMBIE_OBJ_KEPT_FLAGS);
2667 zombie->dfree = dfree;
2668 zombie->data = data;
2669 VALUE prev, next = heap_pages_deferred_final;
2670 do {
2671 zombie->next = prev = next;
2672 next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
2673 } while (next != prev);
2674
2675 struct heap_page *page = GET_HEAP_PAGE(obj);
2676 page->final_slots++;
2677 page->heap->final_slots_count++;
2678}
2679
2680typedef int each_obj_callback(void *, void *, size_t, void *);
2681typedef int each_page_callback(struct heap_page *, void *);
2682
2685 bool reenable_incremental;
2686
2687 each_obj_callback *each_obj_callback;
2688 each_page_callback *each_page_callback;
2689 void *data;
2690
2691 struct heap_page **pages[HEAP_COUNT];
2692 size_t pages_counts[HEAP_COUNT];
2693};
2694
2695static VALUE
2696objspace_each_objects_ensure(VALUE arg)
2697{
2698 struct each_obj_data *data = (struct each_obj_data *)arg;
2699 rb_objspace_t *objspace = data->objspace;
2700
2701 /* Reenable incremental GC */
2702 if (data->reenable_incremental) {
2703 objspace->flags.dont_incremental = FALSE;
2704 }
2705
2706 for (int i = 0; i < HEAP_COUNT; i++) {
2707 struct heap_page **pages = data->pages[i];
2708 free(pages);
2709 }
2710
2711 return Qnil;
2712}
2713
2714static VALUE
2715objspace_each_objects_try(VALUE arg)
2716{
2717 struct each_obj_data *data = (struct each_obj_data *)arg;
2718 rb_objspace_t *objspace = data->objspace;
2719
2720 /* Copy pages from all heaps to their respective buffers. */
2721 for (int i = 0; i < HEAP_COUNT; i++) {
2722 rb_heap_t *heap = &heaps[i];
2723 size_t size = heap->total_pages * sizeof(struct heap_page *);
2724
2725 struct heap_page **pages = malloc(size);
2726 if (!pages) rb_memerror();
2727
2728 /* Set up pages buffer by iterating over all pages in the current eden
2729 * heap. This will be a snapshot of the state of the heap before we
2730 * call the callback over each page that exists in this buffer. Thus it
2731 * is safe for the callback to allocate objects without possibly entering
2732 * an infinite loop. */
2733 struct heap_page *page = 0;
2734 size_t pages_count = 0;
2735 ccan_list_for_each(&heap->pages, page, page_node) {
2736 pages[pages_count] = page;
2737 pages_count++;
2738 }
2739 data->pages[i] = pages;
2740 data->pages_counts[i] = pages_count;
2741 GC_ASSERT(pages_count == heap->total_pages);
2742 }
2743
2744 for (int i = 0; i < HEAP_COUNT; i++) {
2745 rb_heap_t *heap = &heaps[i];
2746 size_t pages_count = data->pages_counts[i];
2747 struct heap_page **pages = data->pages[i];
2748
2749 struct heap_page *page = ccan_list_top(&heap->pages, struct heap_page, page_node);
2750 for (size_t i = 0; i < pages_count; i++) {
2751 /* If we have reached the end of the linked list then there are no
2752 * more pages, so break. */
2753 if (page == NULL) break;
2754
2755 /* If this page does not match the one in the buffer, then move to
2756 * the next page in the buffer. */
2757 if (pages[i] != page) continue;
2758
2759 uintptr_t pstart = (uintptr_t)page->start;
2760 uintptr_t pend = pstart + (page->total_slots * heap->slot_size);
2761
2762 if (data->each_obj_callback &&
2763 (*data->each_obj_callback)((void *)pstart, (void *)pend, heap->slot_size, data->data)) {
2764 break;
2765 }
2766 if (data->each_page_callback &&
2767 (*data->each_page_callback)(page, data->data)) {
2768 break;
2769 }
2770
2771 page = ccan_list_next(&heap->pages, page, page_node);
2772 }
2773 }
2774
2775 return Qnil;
2776}
2777
2778static void
2779objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
2780{
2781 /* Disable incremental GC */
2783 bool reenable_incremental = FALSE;
2784 if (protected) {
2785 reenable_incremental = !objspace->flags.dont_incremental;
2786
2787 gc_rest(objspace);
2788 objspace->flags.dont_incremental = TRUE;
2789 }
2790
2791 each_obj_data->reenable_incremental = reenable_incremental;
2792 memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
2793 memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
2794 rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
2795 objspace_each_objects_ensure, (VALUE)each_obj_data);
2796}
2797
2798static void
2799objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
2800{
2801 struct each_obj_data each_obj_data = {
2802 .objspace = objspace,
2803 .each_obj_callback = callback,
2804 .each_page_callback = NULL,
2805 .data = data,
2806 };
2807 objspace_each_exec(protected, &each_obj_data);
2808}
2809
2810void
2811rb_gc_impl_each_objects(void *objspace_ptr, each_obj_callback *callback, void *data)
2812{
2813 objspace_each_objects(objspace_ptr, callback, data, TRUE);
2814}
2815
2816#if GC_CAN_COMPILE_COMPACTION
2817static void
2818objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
2819{
2820 struct each_obj_data each_obj_data = {
2821 .objspace = objspace,
2822 .each_obj_callback = NULL,
2823 .each_page_callback = callback,
2824 .data = data,
2825 };
2826 objspace_each_exec(protected, &each_obj_data);
2827}
2828#endif
2829
2830VALUE
2831rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2832{
2833 rb_objspace_t *objspace = objspace_ptr;
2834 VALUE table;
2835 st_data_t data;
2836
2837 GC_ASSERT(!OBJ_FROZEN(obj));
2838
2839 RBASIC(obj)->flags |= FL_FINALIZE;
2840
2841 unsigned int lev = RB_GC_VM_LOCK();
2842
2843 if (st_lookup(finalizer_table, obj, &data)) {
2844 table = (VALUE)data;
2845 VALUE dup_table = rb_ary_dup(table);
2846
2847 RB_GC_VM_UNLOCK(lev);
2848 /* avoid duplicate block, table is usually small */
2849 {
2850 long len = RARRAY_LEN(table);
2851 long i;
2852
2853 for (i = 0; i < len; i++) {
2854 VALUE recv = RARRAY_AREF(dup_table, i);
2855 if (rb_equal(recv, block)) { // can't be called with VM lock held
2856 return recv;
2857 }
2858 }
2859 }
2860 lev = RB_GC_VM_LOCK();
2861 RB_GC_GUARD(dup_table);
2862
2863 rb_ary_push(table, block);
2864 }
2865 else {
2866 table = rb_ary_new3(2, rb_obj_id(obj), block);
2867 rb_obj_hide(table);
2868 st_add_direct(finalizer_table, obj, table);
2869 }
2870
2871 RB_GC_VM_UNLOCK(lev);
2872
2873 return block;
2874}
2875
2876void
2877rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
2878{
2879 rb_objspace_t *objspace = objspace_ptr;
2880
2881 GC_ASSERT(!OBJ_FROZEN(obj));
2882
2883 st_data_t data = obj;
2884
2885 int lev = RB_GC_VM_LOCK();
2886 st_delete(finalizer_table, &data, 0);
2887 RB_GC_VM_UNLOCK(lev);
2888
2889 FL_UNSET(obj, FL_FINALIZE);
2890}
2891
2892void
2893rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
2894{
2895 rb_objspace_t *objspace = objspace_ptr;
2896 VALUE table;
2897 st_data_t data;
2898
2899 if (!FL_TEST(obj, FL_FINALIZE)) return;
2900
2901 int lev = RB_GC_VM_LOCK();
2902 if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2903 table = rb_ary_dup((VALUE)data);
2904 RARRAY_ASET(table, 0, rb_obj_id(dest));
2905 st_insert(finalizer_table, dest, table);
2906 FL_SET(dest, FL_FINALIZE);
2907 }
2908 else {
2909 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2910 }
2911 RB_GC_VM_UNLOCK(lev);
2912}
2913
2914static VALUE
2915get_final(long i, void *data)
2916{
2917 VALUE table = (VALUE)data;
2918
2919 return RARRAY_AREF(table, i + 1);
2920}
2921
2922static unsigned int
2923run_final(rb_objspace_t *objspace, VALUE zombie, unsigned int lev)
2924{
2925 if (RZOMBIE(zombie)->dfree) {
2926 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2927 }
2928
2929 st_data_t key = (st_data_t)zombie;
2930 if (FL_TEST_RAW(zombie, FL_FINALIZE)) {
2931 FL_UNSET(zombie, FL_FINALIZE);
2932 st_data_t table;
2933 if (st_delete(finalizer_table, &key, &table)) {
2934 RB_GC_VM_UNLOCK(lev);
2935 rb_gc_run_obj_finalizer(RARRAY_AREF(table, 0), RARRAY_LEN(table) - 1, get_final, (void *)table);
2936 lev = RB_GC_VM_LOCK();
2937 }
2938 else {
2939 rb_bug("FL_FINALIZE flag is set, but finalizers are not found");
2940 }
2941 }
2942 else {
2943 GC_ASSERT(!st_lookup(finalizer_table, key, NULL));
2944 }
2945 return lev;
2946}
2947
2948static void
2949finalize_list(rb_objspace_t *objspace, VALUE zombie)
2950{
2951 while (zombie) {
2952 VALUE next_zombie;
2953 struct heap_page *page;
2954 rb_asan_unpoison_object(zombie, false);
2955 next_zombie = RZOMBIE(zombie)->next;
2956 page = GET_HEAP_PAGE(zombie);
2957
2958 unsigned int lev = RB_GC_VM_LOCK();
2959
2960 lev = run_final(objspace, zombie, lev);
2961 {
2962 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
2963 GC_ASSERT(page->heap->final_slots_count > 0);
2964 GC_ASSERT(page->final_slots > 0);
2965
2966 page->heap->final_slots_count--;
2967 page->final_slots--;
2968 page->free_slots++;
2969 RVALUE_AGE_SET_BITMAP(zombie, 0);
2970 heap_page_add_freeobj(objspace, page, zombie);
2971 page->heap->total_freed_objects++;
2972 }
2973 RB_GC_VM_UNLOCK(lev);
2974
2975 zombie = next_zombie;
2976 }
2977}
2978
2979static void
2980finalize_deferred_heap_pages(rb_objspace_t *objspace)
2981{
2982 VALUE zombie;
2983 while ((zombie = RUBY_ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
2984 finalize_list(objspace, zombie);
2985 }
2986}
2987
2988static void
2989finalize_deferred(rb_objspace_t *objspace)
2990{
2991 rb_gc_set_pending_interrupt();
2992 finalize_deferred_heap_pages(objspace);
2993 rb_gc_unset_pending_interrupt();
2994}
2995
2996static void
2997gc_finalize_deferred(void *dmy)
2998{
2999 rb_objspace_t *objspace = dmy;
3000 if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) return;
3001
3002 finalize_deferred(objspace);
3003 RUBY_ATOMIC_SET(finalizing, 0);
3004}
3005
3006static void
3007gc_finalize_deferred_register(rb_objspace_t *objspace)
3008{
3009 /* will enqueue a call to gc_finalize_deferred */
3010 rb_postponed_job_trigger(objspace->finalize_deferred_pjob);
3011}
3012
3013static int pop_mark_stack(mark_stack_t *stack, VALUE *data);
3014
3015static void
3016gc_abort(void *objspace_ptr)
3017{
3018 rb_objspace_t *objspace = objspace_ptr;
3019
3020 if (is_incremental_marking(objspace)) {
3021 /* Remove all objects from the mark stack. */
3022 VALUE obj;
3023 while (pop_mark_stack(&objspace->mark_stack, &obj));
3024
3025 objspace->flags.during_incremental_marking = FALSE;
3026 }
3027
3028 if (is_lazy_sweeping(objspace)) {
3029 objspace->sweeping_heap_count = 0;
3030 for (int i = 0; i < HEAP_COUNT; i++) {
3031 rb_heap_t *heap = &heaps[i];
3032
3033 heap->sweeping_page = NULL;
3034 struct heap_page *page = NULL;
3035
3036 ccan_list_for_each(&heap->pages, page, page_node) {
3037 page->flags.before_sweep = false;
3038 }
3039 }
3040 }
3041
3042 for (int i = 0; i < HEAP_COUNT; i++) {
3043 rb_heap_t *heap = &heaps[i];
3044 rgengc_mark_and_rememberset_clear(objspace, heap);
3045 }
3046
3047 gc_mode_set(objspace, gc_mode_none);
3048}
3049
3050void
3051rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
3052{
3053 rb_objspace_t *objspace = objspace_ptr;
3054
3055 for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3056 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3057 short stride = page->slot_size;
3058
3059 uintptr_t p = (uintptr_t)page->start;
3060 uintptr_t pend = p + page->total_slots * stride;
3061 for (; p < pend; p += stride) {
3062 VALUE vp = (VALUE)p;
3063 asan_unpoisoning_object(vp) {
3064 if (RB_BUILTIN_TYPE(vp) != T_NONE) {
3065 rb_gc_obj_free_vm_weak_references(vp);
3066 if (rb_gc_obj_free(objspace, vp)) {
3067 RBASIC(vp)->flags = 0;
3068 }
3069 }
3070 }
3071 }
3072 }
3073}
3074
3075static int
3076rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t _data)
3077{
3078 VALUE obj = (VALUE)key;
3079 VALUE table = (VALUE)val;
3080
3081 GC_ASSERT(RB_FL_TEST(obj, FL_FINALIZE));
3082 GC_ASSERT(RB_BUILTIN_TYPE(val) == T_ARRAY);
3083
3084 rb_gc_run_obj_finalizer(RARRAY_AREF(table, 0), RARRAY_LEN(table) - 1, get_final, (void *)table);
3085
3086 FL_UNSET(obj, FL_FINALIZE);
3087
3088 return ST_DELETE;
3089}
3090
3091void
3092rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3093{
3094 rb_objspace_t *objspace = objspace_ptr;
3095
3096#if RGENGC_CHECK_MODE >= 2
3097 gc_verify_internal_consistency(objspace);
3098#endif
3099
3100 /* prohibit incremental GC */
3101 objspace->flags.dont_incremental = 1;
3102
3103 if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) {
3104 /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3105 gc_abort(objspace);
3106 dont_gc_on();
3107 return;
3108 }
3109
3110 while (finalizer_table->num_entries) {
3111 st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, 0);
3112 }
3113
3114 /* run finalizers */
3115 finalize_deferred(objspace);
3116 GC_ASSERT(heap_pages_deferred_final == 0);
3117
3118 /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3119 gc_abort(objspace);
3120
3121 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3122 dont_gc_on();
3123
3124 /* running data/file finalizers are part of garbage collection */
3125 unsigned int lock_lev;
3126 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
3127
3128 /* run data/file object's finalizers */
3129 for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3130 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3131 short stride = page->slot_size;
3132
3133 uintptr_t p = (uintptr_t)page->start;
3134 uintptr_t pend = p + page->total_slots * stride;
3135 for (; p < pend; p += stride) {
3136 VALUE vp = (VALUE)p;
3137 asan_unpoisoning_object(vp) {
3138 if (rb_gc_shutdown_call_finalizer_p(vp)) {
3139 rb_gc_obj_free_vm_weak_references(vp);
3140 if (rb_gc_obj_free(objspace, vp)) {
3141 RBASIC(vp)->flags = 0;
3142 }
3143 }
3144 }
3145 }
3146 }
3147
3148 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
3149
3150 finalize_deferred_heap_pages(objspace);
3151
3152 st_free_table(finalizer_table);
3153 finalizer_table = 0;
3154 RUBY_ATOMIC_SET(finalizing, 0);
3155}
3156
3157void
3158rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data)
3159{
3160 rb_objspace_t *objspace = objspace_ptr;
3161
3162 for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3163 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3164 short stride = page->slot_size;
3165
3166 uintptr_t p = (uintptr_t)page->start;
3167 uintptr_t pend = p + page->total_slots * stride;
3168 for (; p < pend; p += stride) {
3169 VALUE obj = (VALUE)p;
3170
3171 asan_unpoisoning_object(obj) {
3172 func(obj, data);
3173 }
3174 }
3175 }
3176}
3177
3178/*
3179 ------------------------ Garbage Collection ------------------------
3180*/
3181
3182/* Sweeping */
3183
3184static size_t
3185objspace_available_slots(rb_objspace_t *objspace)
3186{
3187 size_t total_slots = 0;
3188 for (int i = 0; i < HEAP_COUNT; i++) {
3189 rb_heap_t *heap = &heaps[i];
3190 total_slots += heap->total_slots;
3191 }
3192 return total_slots;
3193}
3194
3195static size_t
3196objspace_live_slots(rb_objspace_t *objspace)
3197{
3198 return total_allocated_objects(objspace) - total_freed_objects(objspace) - total_final_slots_count(objspace);
3199}
3200
3201static size_t
3202objspace_free_slots(rb_objspace_t *objspace)
3203{
3204 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - total_final_slots_count(objspace);
3205}
3206
3207static void
3208gc_setup_mark_bits(struct heap_page *page)
3209{
3210 /* copy oldgen bitmap to mark bitmap */
3211 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3212}
3213
3214static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
3215static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, struct heap_page *src_page, struct heap_page *dest_page);
3216
3217#if defined(_WIN32)
3218enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
3219
3220static BOOL
3221protect_page_body(struct heap_page_body *body, DWORD protect)
3222{
3223 DWORD old_protect;
3224 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3225}
3226#elif defined(__wasi__)
3227// wasi-libc's mprotect emulation does not support PROT_NONE
3228enum {HEAP_PAGE_LOCK, HEAP_PAGE_UNLOCK};
3229#define protect_page_body(body, protect) 1
3230#else
3231enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3232#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
3233#endif
3234
3235static void
3236lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3237{
3238 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
3239 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
3240 }
3241 else {
3242 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
3243 }
3244}
3245
3246static void
3247unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3248{
3249 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
3250 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
3251 }
3252 else {
3253 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
3254 }
3255}
3256
3257static bool
3258try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
3259{
3260 GC_ASSERT(gc_is_moveable_obj(objspace, src));
3261
3262 struct heap_page *src_page = GET_HEAP_PAGE(src);
3263 if (!free_page) {
3264 return false;
3265 }
3266
3267 /* We should return true if either src is successfully moved, or src is
3268 * unmoveable. A false return will cause the sweeping cursor to be
3269 * incremented to the next page, and src will attempt to move again */
3270 GC_ASSERT(RVALUE_MARKED(objspace, src));
3271
3272 asan_unlock_freelist(free_page);
3273 VALUE dest = (VALUE)free_page->freelist;
3274 asan_lock_freelist(free_page);
3275 if (dest) {
3276 rb_asan_unpoison_object(dest, false);
3277 }
3278 else {
3279 /* if we can't get something from the freelist then the page must be
3280 * full */
3281 return false;
3282 }
3283 asan_unlock_freelist(free_page);
3284 free_page->freelist = ((struct free_slot *)dest)->next;
3285 asan_lock_freelist(free_page);
3286
3287 GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
3288
3289 if (src_page->slot_size > free_page->slot_size) {
3290 objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
3291 }
3292 else if (free_page->slot_size > src_page->slot_size) {
3293 objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
3294 }
3295 objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
3296 objspace->rcompactor.total_moved++;
3297
3298 gc_move(objspace, src, dest, src_page, free_page);
3299 gc_pin(objspace, src);
3300 free_page->free_slots--;
3301
3302 return true;
3303}
3304
3305static void
3306gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
3307{
3308 struct heap_page *cursor = heap->compact_cursor;
3309
3310 while (cursor) {
3311 unlock_page_body(objspace, cursor->body);
3312 cursor = ccan_list_next(&heap->pages, cursor, page_node);
3313 }
3314}
3315
3316static void gc_update_references(rb_objspace_t *objspace);
3317#if GC_CAN_COMPILE_COMPACTION
3318static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
3319#endif
3320
3321#if defined(__MINGW32__) || defined(_WIN32)
3322# define GC_COMPACTION_SUPPORTED 1
3323#else
3324/* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
3325 * the read barrier, so we must disable compaction. */
3326# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
3327#endif
3328
3329#if GC_CAN_COMPILE_COMPACTION
3330static void
3331read_barrier_handler(uintptr_t address)
3332{
3333 rb_objspace_t *objspace = (rb_objspace_t *)rb_gc_get_objspace();
3334
3335 struct heap_page_body *page_body = GET_PAGE_BODY(address);
3336
3337 /* If the page_body is NULL, then mprotect cannot handle it and will crash
3338 * with "Cannot allocate memory". */
3339 if (page_body == NULL) {
3340 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)address);
3341 }
3342
3343 int lev = RB_GC_VM_LOCK();
3344 {
3345 unlock_page_body(objspace, page_body);
3346
3347 objspace->profile.read_barrier_faults++;
3348
3349 invalidate_moved_page(objspace, GET_HEAP_PAGE(address));
3350 }
3351 RB_GC_VM_UNLOCK(lev);
3352}
3353#endif
3354
3355#if !GC_CAN_COMPILE_COMPACTION
3356static void
3357uninstall_handlers(void)
3358{
3359 /* no-op */
3360}
3361
3362static void
3363install_handlers(void)
3364{
3365 /* no-op */
3366}
3367#elif defined(_WIN32)
3368static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
3369typedef void (*signal_handler)(int);
3370static signal_handler old_sigsegv_handler;
3371
3372static LONG WINAPI
3373read_barrier_signal(EXCEPTION_POINTERS *info)
3374{
3375 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
3376 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
3377 /* > The second array element specifies the virtual address of the inaccessible data.
3378 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
3379 *
3380 * Use this address to invalidate the page */
3381 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
3382 return EXCEPTION_CONTINUE_EXECUTION;
3383 }
3384 else {
3385 return EXCEPTION_CONTINUE_SEARCH;
3386 }
3387}
3388
3389static void
3390uninstall_handlers(void)
3391{
3392 signal(SIGSEGV, old_sigsegv_handler);
3393 SetUnhandledExceptionFilter(old_handler);
3394}
3395
3396static void
3397install_handlers(void)
3398{
3399 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
3400 old_sigsegv_handler = signal(SIGSEGV, NULL);
3401 /* Unhandled Exception Filter has access to the violation address similar
3402 * to si_addr from sigaction */
3403 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
3404}
3405#else
3406static struct sigaction old_sigbus_handler;
3407static struct sigaction old_sigsegv_handler;
3408
3409#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3410static exception_mask_t old_exception_masks[32];
3411static mach_port_t old_exception_ports[32];
3412static exception_behavior_t old_exception_behaviors[32];
3413static thread_state_flavor_t old_exception_flavors[32];
3414static mach_msg_type_number_t old_exception_count;
3415
3416static void
3417disable_mach_bad_access_exc(void)
3418{
3419 old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
3420 task_swap_exception_ports(
3421 mach_task_self(), EXC_MASK_BAD_ACCESS,
3422 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
3423 old_exception_masks, &old_exception_count,
3424 old_exception_ports, old_exception_behaviors, old_exception_flavors
3425 );
3426}
3427
3428static void
3429restore_mach_bad_access_exc(void)
3430{
3431 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
3432 task_set_exception_ports(
3433 mach_task_self(),
3434 old_exception_masks[i], old_exception_ports[i],
3435 old_exception_behaviors[i], old_exception_flavors[i]
3436 );
3437 }
3438}
3439#endif
3440
3441static void
3442read_barrier_signal(int sig, siginfo_t *info, void *data)
3443{
3444 // setup SEGV/BUS handlers for errors
3445 struct sigaction prev_sigbus, prev_sigsegv;
3446 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
3447 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
3448
3449 // enable SIGBUS/SEGV
3450 sigset_t set, prev_set;
3451 sigemptyset(&set);
3452 sigaddset(&set, SIGBUS);
3453 sigaddset(&set, SIGSEGV);
3454 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
3455#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3456 disable_mach_bad_access_exc();
3457#endif
3458 // run handler
3459 read_barrier_handler((uintptr_t)info->si_addr);
3460
3461 // reset SEGV/BUS handlers
3462#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3463 restore_mach_bad_access_exc();
3464#endif
3465 sigaction(SIGBUS, &prev_sigbus, NULL);
3466 sigaction(SIGSEGV, &prev_sigsegv, NULL);
3467 sigprocmask(SIG_SETMASK, &prev_set, NULL);
3468}
3469
3470static void
3471uninstall_handlers(void)
3472{
3473#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3474 restore_mach_bad_access_exc();
3475#endif
3476 sigaction(SIGBUS, &old_sigbus_handler, NULL);
3477 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
3478}
3479
3480static void
3481install_handlers(void)
3482{
3483 struct sigaction action;
3484 memset(&action, 0, sizeof(struct sigaction));
3485 sigemptyset(&action.sa_mask);
3486 action.sa_sigaction = read_barrier_signal;
3487 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
3488
3489 sigaction(SIGBUS, &action, &old_sigbus_handler);
3490 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
3491#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3492 disable_mach_bad_access_exc();
3493#endif
3494}
3495#endif
3496
3497static void
3498gc_compact_finish(rb_objspace_t *objspace)
3499{
3500 for (int i = 0; i < HEAP_COUNT; i++) {
3501 rb_heap_t *heap = &heaps[i];
3502 gc_unprotect_pages(objspace, heap);
3503 }
3504
3505 uninstall_handlers();
3506
3507 gc_update_references(objspace);
3508 objspace->profile.compact_count++;
3509
3510 for (int i = 0; i < HEAP_COUNT; i++) {
3511 rb_heap_t *heap = &heaps[i];
3512 heap->compact_cursor = NULL;
3513 heap->free_pages = NULL;
3514 heap->compact_cursor_index = 0;
3515 }
3516
3517 if (gc_prof_enabled(objspace)) {
3518 gc_profile_record *record = gc_prof_record(objspace);
3519 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
3520 }
3521 objspace->flags.during_compacting = FALSE;
3522}
3523
3525 struct heap_page *page;
3526 int final_slots;
3527 int freed_slots;
3528 int empty_slots;
3529};
3530
3531static inline void
3532gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
3533{
3534 struct heap_page *sweep_page = ctx->page;
3535 short slot_size = sweep_page->slot_size;
3536
3537 do {
3538 VALUE vp = (VALUE)p;
3539 GC_ASSERT(vp % sizeof(VALUE) == 0);
3540
3541 rb_asan_unpoison_object(vp, false);
3542 if (bitset & 1) {
3543 switch (BUILTIN_TYPE(vp)) {
3544 case T_MOVED:
3545 if (objspace->flags.during_compacting) {
3546 /* The sweep cursor shouldn't have made it to any
3547 * T_MOVED slots while the compact flag is enabled.
3548 * The sweep cursor and compact cursor move in
3549 * opposite directions, and when they meet references will
3550 * get updated and "during_compacting" should get disabled */
3551 rb_bug("T_MOVED shouldn't be seen until compaction is finished");
3552 }
3553 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3554 ctx->empty_slots++;
3555 heap_page_add_freeobj(objspace, sweep_page, vp);
3556 break;
3557 case T_ZOMBIE:
3558 /* already counted */
3559 break;
3560 case T_NONE:
3561 ctx->empty_slots++; /* already freed */
3562 break;
3563
3564 default:
3565#if RGENGC_CHECK_MODE
3566 if (!is_full_marking(objspace)) {
3567 if (RVALUE_OLD_P(objspace, vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
3568 if (RVALUE_REMEMBERED(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
3569 }
3570#endif
3571
3572#if RGENGC_CHECK_MODE
3573#define CHECK(x) if (x(objspace, vp) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", rb_obj_info(vp))
3574 CHECK(RVALUE_WB_UNPROTECTED);
3575 CHECK(RVALUE_MARKED);
3576 CHECK(RVALUE_MARKING);
3577 CHECK(RVALUE_UNCOLLECTIBLE);
3578#undef CHECK
3579#endif
3580
3581 if (!rb_gc_obj_needs_cleanup_p(vp)) {
3582 if (RB_UNLIKELY(objspace->hook_events & RUBY_INTERNAL_EVENT_FREEOBJ)) {
3583 rb_gc_event_hook(vp, RUBY_INTERNAL_EVENT_FREEOBJ);
3584 }
3585
3586 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, slot_size);
3587 heap_page_add_freeobj(objspace, sweep_page, vp);
3588 gc_report(3, objspace, "page_sweep: %s (fast path) added to freelist\n", rb_obj_info(vp));
3589 ctx->freed_slots++;
3590 }
3591 else {
3592 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
3593
3594 rb_gc_event_hook(vp, RUBY_INTERNAL_EVENT_FREEOBJ);
3595
3596 rb_gc_obj_free_vm_weak_references(vp);
3597 if (rb_gc_obj_free(objspace, vp)) {
3598 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, slot_size);
3599 heap_page_add_freeobj(objspace, sweep_page, vp);
3600 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3601 ctx->freed_slots++;
3602 }
3603 else {
3604 ctx->final_slots++;
3605 }
3606 }
3607 break;
3608 }
3609 }
3610 p += slot_size;
3611 bitset >>= 1;
3612 } while (bitset);
3613}
3614
3615static inline void
3616gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
3617{
3618 struct heap_page *sweep_page = ctx->page;
3619 GC_ASSERT(sweep_page->heap == heap);
3620
3621 uintptr_t p;
3622 bits_t *bits, bitset;
3623
3624 gc_report(2, objspace, "page_sweep: start.\n");
3625
3626#if RGENGC_CHECK_MODE
3627 if (!objspace->flags.immediate_sweep) {
3628 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
3629 }
3630#endif
3631 sweep_page->flags.before_sweep = FALSE;
3632 sweep_page->free_slots = 0;
3633
3634 p = (uintptr_t)sweep_page->start;
3635 bits = sweep_page->mark_bits;
3636 short slot_size = sweep_page->slot_size;
3637 int total_slots = sweep_page->total_slots;
3638 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
3639
3640 int out_of_range_bits = total_slots % BITS_BITLENGTH;
3641 if (out_of_range_bits != 0) {
3642 bits[bitmap_plane_count - 1] |= ~(((bits_t)1 << out_of_range_bits) - 1);
3643 }
3644
3645 // Clear wb_unprotected and age bits for all unmarked slots
3646 {
3647 bits_t *wb_unprotected_bits = sweep_page->wb_unprotected_bits;
3648 bits_t *age_bits = sweep_page->age_bits;
3649 for (int i = 0; i < bitmap_plane_count; i++) {
3650 bits_t unmarked = ~bits[i];
3651 wb_unprotected_bits[i] &= ~unmarked;
3652 age_bits[i * 2] &= ~unmarked;
3653 age_bits[i * 2 + 1] &= ~unmarked;
3654 }
3655 }
3656
3657 for (int i = 0; i < bitmap_plane_count; i++) {
3658 bitset = ~bits[i];
3659 if (bitset) {
3660 gc_sweep_plane(objspace, heap, p, bitset, ctx);
3661 }
3662 p += BITS_BITLENGTH * slot_size;
3663 }
3664
3665 if (!heap->compact_cursor) {
3666 gc_setup_mark_bits(sweep_page);
3667 }
3668
3669#if GC_PROFILE_MORE_DETAIL
3670 if (gc_prof_enabled(objspace)) {
3671 gc_profile_record *record = gc_prof_record(objspace);
3672 record->removing_objects += ctx->final_slots + ctx->freed_slots;
3673 record->empty_objects += ctx->empty_slots;
3674 }
3675#endif
3676 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3677 rb_gc_count(),
3678 sweep_page->total_slots,
3679 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
3680
3681 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
3682 sweep_page->heap->total_freed_objects += ctx->freed_slots;
3683
3684 if (heap_pages_deferred_final && !finalizing) {
3685 gc_finalize_deferred_register(objspace);
3686 }
3687
3688#if RGENGC_CHECK_MODE
3689 short freelist_len = 0;
3690 asan_unlock_freelist(sweep_page);
3691 struct free_slot *ptr = sweep_page->freelist;
3692 while (ptr) {
3693 freelist_len++;
3694 rb_asan_unpoison_object((VALUE)ptr, false);
3695 struct free_slot *next = ptr->next;
3696 rb_asan_poison_object((VALUE)ptr);
3697 ptr = next;
3698 }
3699 asan_lock_freelist(sweep_page);
3700 if (freelist_len != sweep_page->free_slots) {
3701 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
3702 }
3703#endif
3704
3705 gc_report(2, objspace, "page_sweep: end.\n");
3706}
3707
3708static const char *
3709gc_mode_name(enum gc_mode mode)
3710{
3711 switch (mode) {
3712 case gc_mode_none: return "none";
3713 case gc_mode_marking: return "marking";
3714 case gc_mode_sweeping: return "sweeping";
3715 case gc_mode_compacting: return "compacting";
3716 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
3717 }
3718}
3719
3720static void
3721gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
3722{
3723#if RGENGC_CHECK_MODE
3724 enum gc_mode prev_mode = gc_mode(objspace);
3725 switch (prev_mode) {
3726 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
3727 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
3728 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
3729 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
3730 }
3731#endif
3732 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
3733 gc_mode_set(objspace, mode);
3734}
3735
3736static void
3737heap_page_freelist_append(struct heap_page *page, struct free_slot *freelist)
3738{
3739 if (freelist) {
3740 asan_unlock_freelist(page);
3741 if (page->freelist) {
3742 struct free_slot *p = page->freelist;
3743 rb_asan_unpoison_object((VALUE)p, false);
3744 while (p->next) {
3745 struct free_slot *prev = p;
3746 p = p->next;
3747 rb_asan_poison_object((VALUE)prev);
3748 rb_asan_unpoison_object((VALUE)p, false);
3749 }
3750 p->next = freelist;
3751 rb_asan_poison_object((VALUE)p);
3752 }
3753 else {
3754 page->freelist = freelist;
3755 }
3756 asan_lock_freelist(page);
3757 }
3758}
3759
3760static void
3761gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3762{
3763 heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
3764 if (heap->sweeping_page) {
3765 objspace->sweeping_heap_count++;
3766 }
3767 heap->free_pages = NULL;
3768 heap->pooled_pages = NULL;
3769 if (!objspace->flags.immediate_sweep) {
3770 struct heap_page *page = NULL;
3771
3772 ccan_list_for_each(&heap->pages, page, page_node) {
3773 page->flags.before_sweep = TRUE;
3774 }
3775 }
3776}
3777
3778#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3779__attribute__((noinline))
3780#endif
3781
3782#if GC_CAN_COMPILE_COMPACTION
3783static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
3784static int compare_pinned_slots(const void *left, const void *right, void *d);
3785#endif
3786
3787static void
3788gc_ractor_newobj_cache_clear(void *c, void *data)
3789{
3790 rb_objspace_t *objspace = rb_gc_get_objspace();
3791 rb_ractor_newobj_cache_t *newobj_cache = c;
3792
3793 newobj_cache->incremental_mark_step_allocated_slots = 0;
3794
3795 for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3796
3797 rb_ractor_newobj_heap_cache_t *cache = &newobj_cache->heap_caches[heap_idx];
3798
3799 rb_heap_t *heap = &heaps[heap_idx];
3800 RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, cache->allocated_objects_count);
3801 cache->allocated_objects_count = 0;
3802
3803 struct heap_page *page = cache->using_page;
3804 struct free_slot *freelist = cache->freelist;
3805 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
3806
3807 heap_page_freelist_append(page, freelist);
3808
3809 cache->using_page = NULL;
3810 cache->freelist = NULL;
3811 }
3812}
3813
3814static void
3815gc_sweep_start(rb_objspace_t *objspace)
3816{
3817 gc_mode_transition(objspace, gc_mode_sweeping);
3818 objspace->rincgc.pooled_slots = 0;
3819
3820#if GC_CAN_COMPILE_COMPACTION
3821 if (objspace->flags.during_compacting) {
3822 gc_sort_heap_by_compare_func(
3823 objspace,
3824 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
3825 );
3826 }
3827#endif
3828
3829 for (int i = 0; i < HEAP_COUNT; i++) {
3830 rb_heap_t *heap = &heaps[i];
3831 gc_sweep_start_heap(objspace, heap);
3832
3833 /* We should call gc_sweep_finish_heap for size pools with no pages. */
3834 if (heap->sweeping_page == NULL) {
3835 GC_ASSERT(heap->total_pages == 0);
3836 GC_ASSERT(heap->total_slots == 0);
3837 gc_sweep_finish_heap(objspace, heap);
3838 }
3839 }
3840
3841 rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
3842}
3843
3844static void
3845gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3846{
3847 size_t total_slots = heap->total_slots;
3848 size_t swept_slots = heap->freed_slots + heap->empty_slots;
3849
3850 size_t init_slots = gc_params.heap_init_bytes / heap->slot_size;
3851 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
3852
3853 if (swept_slots < min_free_slots &&
3854 /* The heap is a growth heap if it freed more slots than had empty slots. */
3855 ((heap->empty_slots == 0 && total_slots > 0) || heap->freed_slots > heap->empty_slots)) {
3856 /* If we don't have enough slots and we have pages on the tomb heap, move
3857 * pages from the tomb heap to the eden heap. This may prevent page
3858 * creation thrashing (frequently allocating and deallocting pages) and
3859 * GC thrashing (running GC more frequently than required). */
3860 struct heap_page *resurrected_page;
3861 while (swept_slots < min_free_slots &&
3862 (resurrected_page = heap_page_resurrect(objspace))) {
3863 heap_add_page(objspace, heap, resurrected_page);
3864 heap_add_freepage(heap, resurrected_page);
3865
3866 swept_slots += resurrected_page->free_slots;
3867 }
3868
3869 if (swept_slots < min_free_slots) {
3870 /* Grow this heap if we are in a major GC or if we haven't run at least
3871 * RVALUE_OLD_AGE minor GC since the last major GC. */
3872 if (is_full_marking(objspace) ||
3873 objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3874 if (objspace->heap_pages.allocatable_bytes < min_free_slots * heap->slot_size) {
3875 heap_allocatable_bytes_expand(objspace, heap, swept_slots, heap->total_slots, heap->slot_size);
3876 }
3877 }
3878 else if (swept_slots < min_free_slots * 7 / 8 &&
3879 objspace->heap_pages.allocatable_bytes < (min_free_slots * 7 / 8 - swept_slots) * heap->slot_size) {
3880 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
3881 heap->force_major_gc_count++;
3882 }
3883 }
3884 }
3885}
3886
3887static void
3888gc_sweep_finish(rb_objspace_t *objspace)
3889{
3890 gc_report(1, objspace, "gc_sweep_finish\n");
3891
3892 gc_prof_set_heap_info(objspace);
3893 heap_pages_free_unused_pages(objspace);
3894
3895 for (int i = 0; i < HEAP_COUNT; i++) {
3896 rb_heap_t *heap = &heaps[i];
3897
3898 heap->freed_slots = 0;
3899 heap->empty_slots = 0;
3900
3901 if (!will_be_incremental_marking(objspace)) {
3902 struct heap_page *end_page = heap->free_pages;
3903 if (end_page) {
3904 while (end_page->free_next) end_page = end_page->free_next;
3905 end_page->free_next = heap->pooled_pages;
3906 }
3907 else {
3908 heap->free_pages = heap->pooled_pages;
3909 }
3910 heap->pooled_pages = NULL;
3911 objspace->rincgc.pooled_slots = 0;
3912 }
3913 }
3914
3915 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_SWEEP);
3916 gc_mode_transition(objspace, gc_mode_none);
3917
3918#if RGENGC_CHECK_MODE >= 2
3919 gc_verify_internal_consistency(objspace);
3920#endif
3921}
3922
3923static int
3924gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3925{
3926 struct heap_page *sweep_page = heap->sweeping_page;
3927 int swept_slots = 0;
3928 int pooled_slots = 0;
3929 int sweep_budget = GC_INCREMENTAL_SWEEP_BYTES / heap->slot_size;
3930 int pool_budget = GC_INCREMENTAL_SWEEP_POOL_BYTES / heap->slot_size;
3931
3932 if (sweep_page == NULL) return FALSE;
3933
3934#if GC_ENABLE_LAZY_SWEEP
3935 gc_prof_sweep_timer_start(objspace);
3936#endif
3937
3938 do {
3939 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
3940
3941 struct gc_sweep_context ctx = {
3942 .page = sweep_page,
3943 .final_slots = 0,
3944 .freed_slots = 0,
3945 .empty_slots = 0,
3946 };
3947 gc_sweep_page(objspace, heap, &ctx);
3948 int free_slots = ctx.freed_slots + ctx.empty_slots;
3949
3950 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3951
3952 if (free_slots == sweep_page->total_slots) {
3953 /* There are no living objects, so move this page to the global empty pages. */
3954 heap_unlink_page(objspace, heap, sweep_page);
3955
3956 sweep_page->start = 0;
3957 sweep_page->total_slots = 0;
3958 sweep_page->slot_size = 0;
3959 sweep_page->heap = NULL;
3960 sweep_page->free_slots = 0;
3961
3962 asan_unlock_freelist(sweep_page);
3963 sweep_page->freelist = NULL;
3964 asan_lock_freelist(sweep_page);
3965
3966 asan_poison_memory_region(sweep_page->body, HEAP_PAGE_SIZE);
3967
3968 objspace->empty_pages_count++;
3969 sweep_page->free_next = objspace->empty_pages;
3970 objspace->empty_pages = sweep_page;
3971 }
3972 else if (free_slots > 0) {
3973 heap->freed_slots += ctx.freed_slots;
3974 heap->empty_slots += ctx.empty_slots;
3975
3976 if (pooled_slots < pool_budget) {
3977 heap_add_poolpage(objspace, heap, sweep_page);
3978 pooled_slots += free_slots;
3979 }
3980 else {
3981 heap_add_freepage(heap, sweep_page);
3982 swept_slots += free_slots;
3983 if (swept_slots > sweep_budget) {
3984 break;
3985 }
3986 }
3987 }
3988 else {
3989 sweep_page->free_next = NULL;
3990 }
3991 } while ((sweep_page = heap->sweeping_page));
3992
3993 if (!heap->sweeping_page) {
3994 objspace->sweeping_heap_count--;
3995 GC_ASSERT(objspace->sweeping_heap_count >= 0);
3996 gc_sweep_finish_heap(objspace, heap);
3997
3998 if (!has_sweeping_pages(objspace)) {
3999 gc_sweep_finish(objspace);
4000 }
4001 }
4002
4003#if GC_ENABLE_LAZY_SWEEP
4004 gc_prof_sweep_timer_stop(objspace);
4005#endif
4006
4007 return heap->free_pages != NULL;
4008}
4009
4010static void
4011gc_sweep_rest(rb_objspace_t *objspace)
4012{
4013 for (int i = 0; i < HEAP_COUNT; i++) {
4014 rb_heap_t *heap = &heaps[i];
4015
4016 while (heap->sweeping_page) {
4017 gc_sweep_step(objspace, heap);
4018 }
4019 }
4020}
4021
4022static void
4023gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap)
4024{
4025 GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
4026 if (!GC_ENABLE_LAZY_SWEEP) return;
4027
4028 gc_sweeping_enter(objspace);
4029
4030 for (int i = 0; i < HEAP_COUNT; i++) {
4031 rb_heap_t *heap = &heaps[i];
4032 if (gc_sweep_step(objspace, heap)) {
4033 GC_ASSERT(heap->free_pages != NULL);
4034 }
4035 else if (heap == sweep_heap) {
4036 if (objspace->empty_pages_count > 0 || objspace->heap_pages.allocatable_bytes > 0) {
4037 /* [Bug #21548]
4038 *
4039 * If this heap is the heap we want to sweep, but we weren't able
4040 * to free any slots, but we also either have empty pages or could
4041 * allocate new pages, then we want to preemptively claim a page
4042 * because it's possible that sweeping another heap will call
4043 * gc_sweep_finish_heap, which may use up all of the
4044 * empty/allocatable pages. If other heaps are not finished sweeping
4045 * then we do not finish this GC and we will end up triggering a new
4046 * GC cycle during this GC phase. */
4047 heap_page_allocate_and_initialize(objspace, heap);
4048
4049 GC_ASSERT(heap->free_pages != NULL);
4050 }
4051 else {
4052 /* Not allowed to create a new page so finish sweeping. */
4053 gc_sweep_rest(objspace);
4054 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
4055 break;
4056 }
4057 }
4058 }
4059
4060 gc_sweeping_exit(objspace);
4061}
4062
4063VALUE
4064rb_gc_impl_location(void *objspace_ptr, VALUE value)
4065{
4066 VALUE destination;
4067
4068 asan_unpoisoning_object(value) {
4069 if (BUILTIN_TYPE(value) == T_MOVED) {
4070 destination = (VALUE)RMOVED(value)->destination;
4071 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
4072 }
4073 else {
4074 destination = value;
4075 }
4076 }
4077
4078 return destination;
4079}
4080
4081#if GC_CAN_COMPILE_COMPACTION
4082static void
4083invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
4084{
4085 if (bitset) {
4086 do {
4087 if (bitset & 1) {
4088 VALUE forwarding_object = (VALUE)p;
4089 VALUE object;
4090
4091 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
4092 GC_ASSERT(RVALUE_PINNED(objspace, forwarding_object));
4093 GC_ASSERT(!RVALUE_MARKED(objspace, forwarding_object));
4094
4095 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
4096
4097 object = rb_gc_impl_location(objspace, forwarding_object);
4098 gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object), page);
4099 /* forwarding_object is now our actual object, and "object"
4100 * is the free slot for the original page */
4101
4102 struct heap_page *orig_page = GET_HEAP_PAGE(object);
4103 orig_page->free_slots++;
4104 RVALUE_AGE_SET_BITMAP(object, 0);
4105 heap_page_add_freeobj(objspace, orig_page, object);
4106
4107 GC_ASSERT(RVALUE_MARKED(objspace, forwarding_object));
4108 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
4109 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
4110 }
4111 }
4112 p += page->slot_size;
4113 bitset >>= 1;
4114 } while (bitset);
4115 }
4116}
4117
4118static void
4119invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
4120{
4121 int i;
4122 bits_t *mark_bits, *pin_bits;
4123 bits_t bitset;
4124 short slot_size = page->slot_size;
4125 int total_slots = page->total_slots;
4126 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
4127
4128 mark_bits = page->mark_bits;
4129 pin_bits = page->pinned_bits;
4130
4131 uintptr_t p = page->start;
4132
4133 for (i=0; i < bitmap_plane_count; i++) {
4134 /* Moved objects are pinned but never marked. We reuse the pin bits
4135 * to indicate there is a moved object in this slot. */
4136 bitset = pin_bits[i] & ~mark_bits[i];
4137 invalidate_moved_plane(objspace, page, p, bitset);
4138 p += BITS_BITLENGTH * slot_size;
4139 }
4140}
4141#endif
4142
4143static void
4144gc_compact_start(rb_objspace_t *objspace)
4145{
4146 struct heap_page *page = NULL;
4147 gc_mode_transition(objspace, gc_mode_compacting);
4148
4149 for (int i = 0; i < HEAP_COUNT; i++) {
4150 rb_heap_t *heap = &heaps[i];
4151 ccan_list_for_each(&heap->pages, page, page_node) {
4152 page->flags.before_sweep = TRUE;
4153 }
4154
4155 heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
4156 heap->compact_cursor_index = 0;
4157 }
4158
4159 if (gc_prof_enabled(objspace)) {
4160 gc_profile_record *record = gc_prof_record(objspace);
4161 record->moved_objects = objspace->rcompactor.total_moved;
4162 }
4163
4164 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
4165 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
4166 memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
4167 memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
4168
4169 /* Set up read barrier for pages containing MOVED objects */
4170 install_handlers();
4171}
4172
4173static void gc_sweep_compact(rb_objspace_t *objspace);
4174
4175static void
4176gc_sweep(rb_objspace_t *objspace)
4177{
4178 gc_sweeping_enter(objspace);
4179
4180 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4181
4182 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4183
4184 gc_sweep_start(objspace);
4185 if (objspace->flags.during_compacting) {
4186 gc_sweep_compact(objspace);
4187 }
4188
4189 if (immediate_sweep) {
4190#if !GC_ENABLE_LAZY_SWEEP
4191 gc_prof_sweep_timer_start(objspace);
4192#endif
4193 gc_sweep_rest(objspace);
4194#if !GC_ENABLE_LAZY_SWEEP
4195 gc_prof_sweep_timer_stop(objspace);
4196#endif
4197 }
4198 else {
4199
4200 /* Sweep every size pool. */
4201 for (int i = 0; i < HEAP_COUNT; i++) {
4202 rb_heap_t *heap = &heaps[i];
4203 gc_sweep_step(objspace, heap);
4204 }
4205 }
4206
4207 gc_sweeping_exit(objspace);
4208}
4209
4210/* Marking - Marking stack */
4211
4212static stack_chunk_t *
4213stack_chunk_alloc(void)
4214{
4215 stack_chunk_t *res;
4216
4217 res = malloc(sizeof(stack_chunk_t));
4218 if (!res)
4219 rb_memerror();
4220
4221 return res;
4222}
4223
4224static inline int
4225is_mark_stack_empty(mark_stack_t *stack)
4226{
4227 return stack->chunk == NULL;
4228}
4229
4230static size_t
4231mark_stack_size(mark_stack_t *stack)
4232{
4233 size_t size = stack->index;
4234 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4235
4236 while (chunk) {
4237 size += stack->limit;
4238 chunk = chunk->next;
4239 }
4240 return size;
4241}
4242
4243static void
4244add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4245{
4246 chunk->next = stack->cache;
4247 stack->cache = chunk;
4248 stack->cache_size++;
4249}
4250
4251static void
4252shrink_stack_chunk_cache(mark_stack_t *stack)
4253{
4254 stack_chunk_t *chunk;
4255
4256 if (stack->unused_cache_size > (stack->cache_size/2)) {
4257 chunk = stack->cache;
4258 stack->cache = stack->cache->next;
4259 stack->cache_size--;
4260 free(chunk);
4261 }
4262 stack->unused_cache_size = stack->cache_size;
4263}
4264
4265static void
4266push_mark_stack_chunk(mark_stack_t *stack)
4267{
4268 stack_chunk_t *next;
4269
4270 GC_ASSERT(stack->index == stack->limit);
4271
4272 if (stack->cache_size > 0) {
4273 next = stack->cache;
4274 stack->cache = stack->cache->next;
4275 stack->cache_size--;
4276 if (stack->unused_cache_size > stack->cache_size)
4277 stack->unused_cache_size = stack->cache_size;
4278 }
4279 else {
4280 next = stack_chunk_alloc();
4281 }
4282 next->next = stack->chunk;
4283 stack->chunk = next;
4284 stack->index = 0;
4285}
4286
4287static void
4288pop_mark_stack_chunk(mark_stack_t *stack)
4289{
4290 stack_chunk_t *prev;
4291
4292 prev = stack->chunk->next;
4293 GC_ASSERT(stack->index == 0);
4294 add_stack_chunk_cache(stack, stack->chunk);
4295 stack->chunk = prev;
4296 stack->index = stack->limit;
4297}
4298
4299static void
4300mark_stack_chunk_list_free(stack_chunk_t *chunk)
4301{
4302 stack_chunk_t *next = NULL;
4303
4304 while (chunk != NULL) {
4305 next = chunk->next;
4306 free(chunk);
4307 chunk = next;
4308 }
4309}
4310
4311static void
4312free_stack_chunks(mark_stack_t *stack)
4313{
4314 mark_stack_chunk_list_free(stack->chunk);
4315}
4316
4317static void
4318mark_stack_free_cache(mark_stack_t *stack)
4319{
4320 mark_stack_chunk_list_free(stack->cache);
4321 stack->cache_size = 0;
4322 stack->unused_cache_size = 0;
4323}
4324
4325static void
4326push_mark_stack(mark_stack_t *stack, VALUE obj)
4327{
4328 switch (BUILTIN_TYPE(obj)) {
4329 case T_OBJECT:
4330 case T_CLASS:
4331 case T_MODULE:
4332 case T_FLOAT:
4333 case T_STRING:
4334 case T_REGEXP:
4335 case T_ARRAY:
4336 case T_HASH:
4337 case T_STRUCT:
4338 case T_BIGNUM:
4339 case T_FILE:
4340 case T_DATA:
4341 case T_MATCH:
4342 case T_COMPLEX:
4343 case T_RATIONAL:
4344 case T_TRUE:
4345 case T_FALSE:
4346 case T_SYMBOL:
4347 case T_IMEMO:
4348 case T_ICLASS:
4349 if (stack->index == stack->limit) {
4350 push_mark_stack_chunk(stack);
4351 }
4352 stack->chunk->data[stack->index++] = obj;
4353 return;
4354
4355 case T_NONE:
4356 case T_NIL:
4357 case T_FIXNUM:
4358 case T_MOVED:
4359 case T_ZOMBIE:
4360 case T_UNDEF:
4361 case T_MASK:
4362 rb_bug("push_mark_stack() called for broken object");
4363 break;
4364
4365 case T_NODE:
4366 rb_bug("push_mark_stack: unexpected T_NODE object");
4367 break;
4368 }
4369
4370 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
4371 BUILTIN_TYPE(obj), (void *)obj,
4372 is_pointer_to_heap((rb_objspace_t *)rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
4373}
4374
4375static int
4376pop_mark_stack(mark_stack_t *stack, VALUE *data)
4377{
4378 if (is_mark_stack_empty(stack)) {
4379 return FALSE;
4380 }
4381 if (stack->index == 1) {
4382 *data = stack->chunk->data[--stack->index];
4383 pop_mark_stack_chunk(stack);
4384 }
4385 else {
4386 *data = stack->chunk->data[--stack->index];
4387 }
4388 return TRUE;
4389}
4390
4391static void
4392init_mark_stack(mark_stack_t *stack)
4393{
4394 int i;
4395
4396 MEMZERO(stack, mark_stack_t, 1);
4397 stack->index = stack->limit = STACK_CHUNK_SIZE;
4398
4399 for (i=0; i < 4; i++) {
4400 add_stack_chunk_cache(stack, stack_chunk_alloc());
4401 }
4402 stack->unused_cache_size = stack->cache_size;
4403}
4404
4405/* Marking */
4406
4407static void
4408rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
4409{
4410 if (objspace->rgengc.parent_object_old_p) {
4411 if (RVALUE_WB_UNPROTECTED(objspace, obj) || !RVALUE_OLD_P(objspace, obj)) {
4412 rgengc_remember(objspace, objspace->rgengc.parent_object);
4413 }
4414 }
4415}
4416
4417static inline int
4418gc_mark_set(rb_objspace_t *objspace, VALUE obj)
4419{
4420 if (RVALUE_MARKED(objspace, obj)) return 0;
4421 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4422 return 1;
4423}
4424
4425static void
4426gc_aging(rb_objspace_t *objspace, VALUE obj)
4427{
4428 /* Disable aging if Major GC's are disabled. This will prevent longish lived
4429 * objects filling up the heap at the expense of marking many more objects.
4430 *
4431 * We should always pre-warm our process when disabling majors, by running
4432 * GC manually several times so that most objects likely to become oldgen
4433 * are already oldgen.
4434 */
4435 if(!gc_config_full_mark_val)
4436 return;
4437
4438 struct heap_page *page = GET_HEAP_PAGE(obj);
4439
4440 GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
4441 check_rvalue_consistency(objspace, obj);
4442
4443 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4444 if (!RVALUE_OLD_P(objspace, obj)) {
4445 int t = BUILTIN_TYPE(obj);
4446 if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
4447 gc_report(3, objspace, "gc_aging: YOUNG class: %s\n", rb_obj_info(obj));
4448 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE);
4449 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
4450 }
4451 else {
4452 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", rb_obj_info(obj));
4453 RVALUE_AGE_INC(objspace, obj);
4454 }
4455 }
4456 else if (is_full_marking(objspace)) {
4457 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4458 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4459 }
4460 }
4461 check_rvalue_consistency(objspace, obj);
4462
4463 objspace->marked_slots++;
4464}
4465
4466static void
4467gc_grey(rb_objspace_t *objspace, VALUE obj)
4468{
4469#if RGENGC_CHECK_MODE
4470 if (RVALUE_MARKED(objspace, obj) == FALSE) rb_bug("gc_grey: %s is not marked.", rb_obj_info(obj));
4471 if (RVALUE_MARKING(objspace, obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", rb_obj_info(obj));
4472#endif
4473
4474 if (is_incremental_marking(objspace)) {
4475 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4476 }
4477
4479 rb_darray_append_without_gc(&objspace->weak_references, obj);
4480 }
4481
4482 push_mark_stack(&objspace->mark_stack, obj);
4483}
4484
4485static inline void
4486gc_mark_check_t_none(rb_objspace_t *objspace, VALUE obj)
4487{
4488 if (RB_UNLIKELY(BUILTIN_TYPE(obj) == T_NONE)) {
4489 enum {info_size = 256};
4490 char obj_info_buf[info_size];
4491 rb_raw_obj_info(obj_info_buf, info_size, obj);
4492
4493 char parent_obj_info_buf[info_size];
4494 rb_raw_obj_info(parent_obj_info_buf, info_size, objspace->rgengc.parent_object);
4495
4496 rb_bug("try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
4497 }
4498}
4499
4500static void
4501gc_mark(rb_objspace_t *objspace, VALUE obj)
4502{
4503 GC_ASSERT(during_gc);
4504 GC_ASSERT(!objspace->flags.during_reference_updating);
4505
4506 rgengc_check_relation(objspace, obj);
4507 if (!gc_mark_set(objspace, obj)) return; /* already marked */
4508
4509 if (0) { // for debug GC marking miss
4510 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
4511 (void *)obj, obj_type_name(obj),
4512 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
4513 }
4514
4515 gc_mark_check_t_none(objspace, obj);
4516
4517 gc_aging(objspace, obj);
4518 gc_grey(objspace, obj);
4519}
4520
4521static inline void
4522gc_pin(rb_objspace_t *objspace, VALUE obj)
4523{
4524 GC_ASSERT(!SPECIAL_CONST_P(obj));
4525 if (RB_UNLIKELY(objspace->flags.during_compacting)) {
4526 if (RB_LIKELY(during_gc)) {
4527 if (!RVALUE_PINNED(objspace, obj)) {
4528 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
4529 GET_HEAP_PAGE(obj)->pinned_slots++;
4530 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
4531 }
4532 }
4533 }
4534}
4535
4536static inline void
4537gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
4538{
4539 gc_pin(objspace, obj);
4540 gc_mark(objspace, obj);
4541}
4542
4543void
4544rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
4545{
4546 rb_objspace_t *objspace = objspace_ptr;
4547
4548 if (RB_UNLIKELY(objspace->flags.during_reference_updating)) {
4549 GC_ASSERT(objspace->flags.during_compacting);
4550 GC_ASSERT(during_gc);
4551
4552 VALUE destination = rb_gc_impl_location(objspace, *ptr);
4553 if (destination != *ptr) {
4554 *ptr = destination;
4555 }
4556 }
4557 else {
4558 gc_mark(objspace, *ptr);
4559 }
4560}
4561
4562void
4563rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
4564{
4565 rb_objspace_t *objspace = objspace_ptr;
4566
4567 gc_mark(objspace, obj);
4568}
4569
4570void
4571rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
4572{
4573 rb_objspace_t *objspace = objspace_ptr;
4574
4575 gc_mark_and_pin(objspace, obj);
4576}
4577
4578void
4579rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
4580{
4581 rb_objspace_t *objspace = objspace_ptr;
4582
4583 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
4584
4585 if (is_pointer_to_heap(objspace, (void *)obj)) {
4586 asan_unpoisoning_object(obj) {
4587 /* Garbage can live on the stack, so do not mark or pin */
4588 switch (BUILTIN_TYPE(obj)) {
4589 case T_ZOMBIE:
4590 case T_NONE:
4591 break;
4592 default:
4593 gc_mark_and_pin(objspace, obj);
4594 break;
4595 }
4596 }
4597 }
4598}
4599
4600static int
4601pin_value(st_data_t key, st_data_t value, st_data_t data)
4602{
4603 rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
4604
4605 return ST_CONTINUE;
4606}
4607
4608static inline void
4609gc_mark_set_parent_raw(rb_objspace_t *objspace, VALUE obj, bool old_p)
4610{
4611 asan_unpoison_memory_region(&objspace->rgengc.parent_object, sizeof(objspace->rgengc.parent_object), false);
4612 asan_unpoison_memory_region(&objspace->rgengc.parent_object_old_p, sizeof(objspace->rgengc.parent_object_old_p), false);
4613 objspace->rgengc.parent_object = obj;
4614 objspace->rgengc.parent_object_old_p = old_p;
4615}
4616
4617static inline void
4618gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
4619{
4620 gc_mark_set_parent_raw(objspace, obj, RVALUE_OLD_P(objspace, obj));
4621}
4622
4623static inline void
4624gc_mark_set_parent_invalid(rb_objspace_t *objspace)
4625{
4626 asan_poison_memory_region(&objspace->rgengc.parent_object, sizeof(objspace->rgengc.parent_object));
4627 asan_poison_memory_region(&objspace->rgengc.parent_object_old_p, sizeof(objspace->rgengc.parent_object_old_p));
4628}
4629
4630static void
4631mark_roots(rb_objspace_t *objspace, const char **categoryp)
4632{
4633#define MARK_CHECKPOINT(category) do { \
4634 if (categoryp) *categoryp = category; \
4635} while (0)
4636
4637 MARK_CHECKPOINT("objspace");
4638 gc_mark_set_parent_raw(objspace, Qundef, false);
4639
4640 if (finalizer_table != NULL) {
4641 st_foreach(finalizer_table, pin_value, (st_data_t)objspace);
4642 }
4643
4644 if (stress_to_class) rb_gc_mark(stress_to_class);
4645
4646 rb_gc_save_machine_context();
4647 rb_gc_mark_roots(objspace, categoryp);
4648 gc_mark_set_parent_invalid(objspace);
4649}
4650
4651static void
4652gc_mark_children(rb_objspace_t *objspace, VALUE obj)
4653{
4654 gc_mark_set_parent(objspace, obj);
4655 rb_gc_mark_children(objspace, obj);
4656 gc_mark_set_parent_invalid(objspace);
4657}
4658
4663static inline int
4664gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
4665{
4666 mark_stack_t *mstack = &objspace->mark_stack;
4667 VALUE obj;
4668 size_t marked_slots_at_the_beginning = objspace->marked_slots;
4669 size_t popped_count = 0;
4670
4671 while (pop_mark_stack(mstack, &obj)) {
4672 if (obj == Qundef) continue; /* skip */
4673
4674 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(objspace, obj)) {
4675 rb_bug("gc_mark_stacked_objects: %s is not marked.", rb_obj_info(obj));
4676 }
4677 gc_mark_children(objspace, obj);
4678
4679 if (incremental) {
4680 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(objspace, obj)) {
4681 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
4682 }
4683 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4684 popped_count++;
4685
4686 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4687 break;
4688 }
4689 }
4690 else {
4691 /* just ignore marking bits */
4692 }
4693 }
4694
4695 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
4696
4697 if (is_mark_stack_empty(mstack)) {
4698 shrink_stack_chunk_cache(mstack);
4699 return TRUE;
4700 }
4701 else {
4702 return FALSE;
4703 }
4704}
4705
4706static int
4707gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
4708{
4709 return gc_mark_stacked_objects(objspace, TRUE, count);
4710}
4711
4712static int
4713gc_mark_stacked_objects_all(rb_objspace_t *objspace)
4714{
4715 return gc_mark_stacked_objects(objspace, FALSE, 0);
4716}
4717
4718#if RGENGC_CHECK_MODE >= 4
4719
4720#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4721#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4722#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4723
4724struct reflist {
4725 VALUE *list;
4726 int pos;
4727 int size;
4728};
4729
4730static struct reflist *
4731reflist_create(VALUE obj)
4732{
4733 struct reflist *refs = xmalloc(sizeof(struct reflist));
4734 refs->size = 1;
4735 refs->list = ALLOC_N(VALUE, refs->size);
4736 refs->list[0] = obj;
4737 refs->pos = 1;
4738 return refs;
4739}
4740
4741static void
4742reflist_destruct(struct reflist *refs)
4743{
4744 xfree(refs->list);
4745 xfree(refs);
4746}
4747
4748static void
4749reflist_add(struct reflist *refs, VALUE obj)
4750{
4751 if (refs->pos == refs->size) {
4752 refs->size *= 2;
4753 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
4754 }
4755
4756 refs->list[refs->pos++] = obj;
4757}
4758
4759static void
4760reflist_dump(struct reflist *refs)
4761{
4762 int i;
4763 for (i=0; i<refs->pos; i++) {
4764 VALUE obj = refs->list[i];
4765 if (IS_ROOTSIG(obj)) { /* root */
4766 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
4767 }
4768 else {
4769 fprintf(stderr, "<%s>", rb_obj_info(obj));
4770 }
4771 if (i+1 < refs->pos) fprintf(stderr, ", ");
4772 }
4773}
4774
4775static int
4776reflist_referred_from_machine_context(struct reflist *refs)
4777{
4778 int i;
4779 for (i=0; i<refs->pos; i++) {
4780 VALUE obj = refs->list[i];
4781 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
4782 }
4783 return 0;
4784}
4785
4786struct allrefs {
4788 /* a -> obj1
4789 * b -> obj1
4790 * c -> obj1
4791 * c -> obj2
4792 * d -> obj3
4793 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
4794 */
4795 struct st_table *references;
4796 const char *category;
4797 VALUE root_obj;
4799};
4800
4801static int
4802allrefs_add(struct allrefs *data, VALUE obj)
4803{
4804 struct reflist *refs;
4805 st_data_t r;
4806
4807 if (st_lookup(data->references, obj, &r)) {
4808 refs = (struct reflist *)r;
4809 reflist_add(refs, data->root_obj);
4810 return 0;
4811 }
4812 else {
4813 refs = reflist_create(data->root_obj);
4814 st_insert(data->references, obj, (st_data_t)refs);
4815 return 1;
4816 }
4817}
4818
4819static void
4820allrefs_i(VALUE obj, void *ptr)
4821{
4822 struct allrefs *data = (struct allrefs *)ptr;
4823
4824 if (allrefs_add(data, obj)) {
4825 push_mark_stack(&data->mark_stack, obj);
4826 }
4827}
4828
4829static void
4830allrefs_roots_i(VALUE obj, void *ptr)
4831{
4832 struct allrefs *data = (struct allrefs *)ptr;
4833 if (strlen(data->category) == 0) rb_bug("!!!");
4834 data->root_obj = MAKE_ROOTSIG(data->category);
4835
4836 if (allrefs_add(data, obj)) {
4837 push_mark_stack(&data->mark_stack, obj);
4838 }
4839}
4840#define PUSH_MARK_FUNC_DATA(v) do { \
4841 struct gc_mark_func_data_struct *prev_mark_func_data = GET_VM()->gc.mark_func_data; \
4842 GET_VM()->gc.mark_func_data = (v);
4843
4844#define POP_MARK_FUNC_DATA() GET_VM()->gc.mark_func_data = prev_mark_func_data;} while (0)
4845
4846static st_table *
4847objspace_allrefs(rb_objspace_t *objspace)
4848{
4849 struct allrefs data;
4850 struct gc_mark_func_data_struct mfd;
4851 VALUE obj;
4852 int prev_dont_gc = dont_gc_val();
4853 dont_gc_on();
4854
4855 data.objspace = objspace;
4856 data.references = st_init_numtable();
4857 init_mark_stack(&data.mark_stack);
4858
4859 mfd.mark_func = allrefs_roots_i;
4860 mfd.data = &data;
4861
4862 /* traverse root objects */
4863 PUSH_MARK_FUNC_DATA(&mfd);
4864 GET_VM()->gc.mark_func_data = &mfd;
4865 mark_roots(objspace, &data.category);
4866 POP_MARK_FUNC_DATA();
4867
4868 /* traverse rest objects reachable from root objects */
4869 while (pop_mark_stack(&data.mark_stack, &obj)) {
4870 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4871 }
4872 free_stack_chunks(&data.mark_stack);
4873
4874 dont_gc_set(prev_dont_gc);
4875 return data.references;
4876}
4877
4878static int
4879objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
4880{
4881 struct reflist *refs = (struct reflist *)value;
4882 reflist_destruct(refs);
4883 return ST_CONTINUE;
4884}
4885
4886static void
4887objspace_allrefs_destruct(struct st_table *refs)
4888{
4889 st_foreach(refs, objspace_allrefs_destruct_i, 0);
4890 st_free_table(refs);
4891}
4892
4893#if RGENGC_CHECK_MODE >= 5
4894static int
4895allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
4896{
4897 VALUE obj = (VALUE)k;
4898 struct reflist *refs = (struct reflist *)v;
4899 fprintf(stderr, "[allrefs_dump_i] %s <- ", rb_obj_info(obj));
4900 reflist_dump(refs);
4901 fprintf(stderr, "\n");
4902 return ST_CONTINUE;
4903}
4904
4905static void
4906allrefs_dump(rb_objspace_t *objspace)
4907{
4908 VALUE size = objspace->rgengc.allrefs_table->num_entries;
4909 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
4910 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4911}
4912#endif
4913
4914static int
4915gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
4916{
4917 VALUE obj = k;
4918 struct reflist *refs = (struct reflist *)v;
4920
4921 /* object should be marked or oldgen */
4922 if (!RVALUE_MARKED(objspace, obj)) {
4923 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", rb_obj_info(obj));
4924 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
4925 reflist_dump(refs);
4926
4927 if (reflist_referred_from_machine_context(refs)) {
4928 fprintf(stderr, " (marked from machine stack).\n");
4929 /* marked from machine context can be false positive */
4930 }
4931 else {
4932 objspace->rgengc.error_count++;
4933 fprintf(stderr, "\n");
4934 }
4935 }
4936 return ST_CONTINUE;
4937}
4938
4939static void
4940gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
4941{
4942 size_t saved_malloc_increase = objspace->malloc_params.increase;
4943#if RGENGC_ESTIMATE_OLDMALLOC
4944 size_t saved_oldmalloc_increase = objspace->malloc_counters.oldmalloc_increase;
4945#endif
4946 VALUE already_disabled = rb_objspace_gc_disable(objspace);
4947
4948 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
4949
4950 if (checker_func) {
4951 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
4952 }
4953
4954 if (objspace->rgengc.error_count > 0) {
4955#if RGENGC_CHECK_MODE >= 5
4956 allrefs_dump(objspace);
4957#endif
4958 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
4959 }
4960
4961 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
4962 objspace->rgengc.allrefs_table = 0;
4963
4964 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4965 objspace->malloc_params.increase = saved_malloc_increase;
4966#if RGENGC_ESTIMATE_OLDMALLOC
4967 objspace->malloc_counters.oldmalloc_increase = saved_oldmalloc_increase;
4968#endif
4969}
4970#endif /* RGENGC_CHECK_MODE >= 4 */
4971
4974 int err_count;
4975 size_t live_object_count;
4976 size_t zombie_object_count;
4977
4978 VALUE parent;
4979 size_t old_object_count;
4980 size_t remembered_shady_count;
4981};
4982
4983static void
4984check_generation_i(const VALUE child, void *ptr)
4985{
4987 const VALUE parent = data->parent;
4988
4989 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(data->objspace, parent));
4990
4991 if (!RVALUE_OLD_P(data->objspace, child)) {
4992 if (!RVALUE_REMEMBERED(data->objspace, parent) &&
4993 !RVALUE_REMEMBERED(data->objspace, child) &&
4994 !RVALUE_UNCOLLECTIBLE(data->objspace, child)) {
4995 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", rb_obj_info(parent), rb_obj_info(child));
4996 data->err_count++;
4997 }
4998 }
4999}
5000
5001static void
5002check_color_i(const VALUE child, void *ptr)
5003{
5005 const VALUE parent = data->parent;
5006
5007 if (!RVALUE_WB_UNPROTECTED(data->objspace, parent) && RVALUE_WHITE_P(data->objspace, child)) {
5008 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5009 rb_obj_info(parent), rb_obj_info(child));
5010 data->err_count++;
5011 }
5012}
5013
5014static void
5015check_children_i(const VALUE child, void *ptr)
5016{
5018 if (check_rvalue_consistency_force(data->objspace, child, FALSE) != 0) {
5019 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
5020 rb_obj_info(child), rb_obj_info(data->parent));
5021
5022 data->err_count++;
5023 }
5024}
5025
5026static int
5027verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
5029{
5030 VALUE obj;
5031 rb_objspace_t *objspace = data->objspace;
5032
5033 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5034 asan_unpoisoning_object(obj) {
5035 if (!rb_gc_impl_garbage_object_p(objspace, obj)) {
5036 /* count objects */
5037 data->live_object_count++;
5038 data->parent = obj;
5039
5040 /* Normally, we don't expect T_MOVED objects to be in the heap.
5041 * But they can stay alive on the stack, */
5042 if (!gc_object_moved_p(objspace, obj)) {
5043 /* moved slots don't have children */
5044 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5045 }
5046
5047 /* check health of children */
5048 if (RVALUE_OLD_P(objspace, obj)) data->old_object_count++;
5049 if (RVALUE_WB_UNPROTECTED(objspace, obj) && RVALUE_UNCOLLECTIBLE(objspace, obj)) data->remembered_shady_count++;
5050
5051 if (!is_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
5052 /* reachable objects from an oldgen object should be old or (young with remember) */
5053 data->parent = obj;
5054 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5055 }
5056
5057 if (!is_marking(objspace) && rb_gc_obj_shareable_p(obj)) {
5058 rb_gc_verify_shareable(obj);
5059 }
5060
5061 if (is_incremental_marking(objspace)) {
5062 if (RVALUE_BLACK_P(objspace, obj)) {
5063 /* reachable objects from black objects should be black or grey objects */
5064 data->parent = obj;
5065 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5066 }
5067 }
5068 }
5069 else {
5070 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5071 data->zombie_object_count++;
5072
5073 if ((RBASIC(obj)->flags & ~ZOMBIE_OBJ_KEPT_FLAGS) != T_ZOMBIE) {
5074 fprintf(stderr, "verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
5075 rb_obj_info(obj));
5076 data->err_count++;
5077 }
5078
5079 if (!!FL_TEST(obj, FL_FINALIZE) != !!st_is_member(finalizer_table, obj)) {
5080 fprintf(stderr, "verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
5081 FL_TEST(obj, FL_FINALIZE) ? "set" : "not set", st_is_member(finalizer_table, obj) ? "in" : "not in",
5082 rb_obj_info(obj));
5083 data->err_count++;
5084 }
5085 }
5086 }
5087 }
5088 }
5089
5090 return 0;
5091}
5092
5093static int
5094gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
5095{
5096 unsigned int has_remembered_shady = FALSE;
5097 unsigned int has_remembered_old = FALSE;
5098 int remembered_old_objects = 0;
5099 int free_objects = 0;
5100 int zombie_objects = 0;
5101
5102 short slot_size = page->slot_size;
5103 uintptr_t start = (uintptr_t)page->start;
5104 uintptr_t end = start + page->total_slots * slot_size;
5105
5106 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5107 VALUE val = (VALUE)ptr;
5108 asan_unpoisoning_object(val) {
5109 enum ruby_value_type type = BUILTIN_TYPE(val);
5110
5111 if (type == T_NONE) free_objects++;
5112 if (type == T_ZOMBIE) zombie_objects++;
5113 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5114 has_remembered_shady = TRUE;
5115 }
5116 if (RVALUE_PAGE_MARKING(page, val)) {
5117 has_remembered_old = TRUE;
5118 remembered_old_objects++;
5119 }
5120 }
5121 }
5122
5123 if (!is_incremental_marking(objspace) &&
5124 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5125
5126 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5127 VALUE val = (VALUE)ptr;
5128 if (RVALUE_PAGE_MARKING(page, val)) {
5129 fprintf(stderr, "marking -> %s\n", rb_obj_info(val));
5130 }
5131 }
5132 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5133 (void *)page, remembered_old_objects, obj ? rb_obj_info(obj) : "");
5134 }
5135
5136 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
5137 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5138 (void *)page, obj ? rb_obj_info(obj) : "");
5139 }
5140
5141 if (0) {
5142 /* free_slots may not equal to free_objects */
5143 if (page->free_slots != free_objects) {
5144 rb_bug("page %p's free_slots should be %d, but %d", (void *)page, page->free_slots, free_objects);
5145 }
5146 }
5147 if (page->final_slots != zombie_objects) {
5148 rb_bug("page %p's final_slots should be %d, but %d", (void *)page, page->final_slots, zombie_objects);
5149 }
5150
5151 return remembered_old_objects;
5152}
5153
5154static int
5155gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
5156{
5157 int remembered_old_objects = 0;
5158 struct heap_page *page = 0;
5159
5160 ccan_list_for_each(head, page, page_node) {
5161 asan_unlock_freelist(page);
5162 struct free_slot *p = page->freelist;
5163 while (p) {
5164 VALUE vp = (VALUE)p;
5165 VALUE prev = vp;
5166 rb_asan_unpoison_object(vp, false);
5167 if (BUILTIN_TYPE(vp) != T_NONE) {
5168 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", rb_obj_info(vp));
5169 }
5170 p = p->next;
5171 rb_asan_poison_object(prev);
5172 }
5173 asan_lock_freelist(page);
5174
5175 if (page->flags.has_remembered_objects == FALSE) {
5176 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
5177 }
5178 }
5179
5180 return remembered_old_objects;
5181}
5182
5183static int
5184gc_verify_heap_pages(rb_objspace_t *objspace)
5185{
5186 int remembered_old_objects = 0;
5187 for (int i = 0; i < HEAP_COUNT; i++) {
5188 remembered_old_objects += gc_verify_heap_pages_(objspace, &((&heaps[i])->pages));
5189 }
5190 return remembered_old_objects;
5191}
5192
5193static void
5194gc_verify_internal_consistency_(rb_objspace_t *objspace)
5195{
5196 struct verify_internal_consistency_struct data = {0};
5197
5198 data.objspace = objspace;
5199 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
5200
5201 /* check relations */
5202 for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
5203 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
5204 short slot_size = page->slot_size;
5205
5206 uintptr_t start = (uintptr_t)page->start;
5207 uintptr_t end = start + page->total_slots * slot_size;
5208
5209 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
5210 }
5211
5212 if (data.err_count != 0) {
5213#if RGENGC_CHECK_MODE >= 5
5214 objspace->rgengc.error_count = data.err_count;
5215 gc_marks_check(objspace, NULL, NULL);
5216 allrefs_dump(objspace);
5217#endif
5218 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
5219 }
5220
5221 /* check heap_page status */
5222 gc_verify_heap_pages(objspace);
5223
5224 /* check counters */
5225
5226 ractor_cache_flush_count(objspace, rb_gc_get_ractor_newobj_cache());
5227
5228 if (!is_lazy_sweeping(objspace) &&
5229 !finalizing &&
5230 !rb_gc_multi_ractor_p()) {
5231 if (objspace_live_slots(objspace) != data.live_object_count) {
5232 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", total_freed_objects: %"PRIdSIZE"\n",
5233 total_final_slots_count(objspace), total_freed_objects(objspace));
5234 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5235 objspace_live_slots(objspace), data.live_object_count);
5236 }
5237 }
5238
5239 if (!is_marking(objspace)) {
5240 if (objspace->rgengc.old_objects != data.old_object_count) {
5241 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5242 objspace->rgengc.old_objects, data.old_object_count);
5243 }
5244 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5245 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
5246 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5247 }
5248 }
5249
5250 if (!finalizing) {
5251 size_t list_count = 0;
5252
5253 {
5254 VALUE z = heap_pages_deferred_final;
5255 while (z) {
5256 list_count++;
5257 z = RZOMBIE(z)->next;
5258 }
5259 }
5260
5261 if (total_final_slots_count(objspace) != data.zombie_object_count ||
5262 total_final_slots_count(objspace) != list_count) {
5263
5264 rb_bug("inconsistent finalizing object count:\n"
5265 " expect %"PRIuSIZE"\n"
5266 " but %"PRIuSIZE" zombies\n"
5267 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5268 total_final_slots_count(objspace),
5269 data.zombie_object_count,
5270 list_count);
5271 }
5272 }
5273
5274 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
5275}
5276
5277static void
5278gc_verify_internal_consistency(void *objspace_ptr)
5279{
5280 rb_objspace_t *objspace = objspace_ptr;
5281
5282 unsigned int lev = RB_GC_VM_LOCK();
5283 {
5284 rb_gc_vm_barrier(); // stop other ractors
5285
5286 unsigned int prev_during_gc = during_gc;
5287 during_gc = FALSE; // stop gc here
5288 {
5289 gc_verify_internal_consistency_(objspace);
5290 }
5291 during_gc = prev_during_gc;
5292 }
5293 RB_GC_VM_UNLOCK(lev);
5294}
5295
5296static void
5297heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
5298{
5299 if (heap->pooled_pages) {
5300 if (heap->free_pages) {
5301 struct heap_page *free_pages_tail = heap->free_pages;
5302 while (free_pages_tail->free_next) {
5303 free_pages_tail = free_pages_tail->free_next;
5304 }
5305 free_pages_tail->free_next = heap->pooled_pages;
5306 }
5307 else {
5308 heap->free_pages = heap->pooled_pages;
5309 }
5310
5311 heap->pooled_pages = NULL;
5312 }
5313}
5314
5315static int
5316gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5317{
5318 struct heap_page *page = GET_HEAP_PAGE(obj);
5319 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
5320
5321 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
5322 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
5323 MARK_IN_BITMAP(uncollectible_bits, obj);
5324 objspace->rgengc.uncollectible_wb_unprotected_objects++;
5325
5326#if RGENGC_PROFILE > 0
5327 objspace->profile.total_remembered_shady_object_count++;
5328#if RGENGC_PROFILE >= 2
5329 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5330#endif
5331#endif
5332 return TRUE;
5333 }
5334 else {
5335 return FALSE;
5336 }
5337}
5338
5339static inline void
5340gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits, short slot_size)
5341{
5342 if (bits) {
5343 do {
5344 if (bits & 1) {
5345 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", rb_obj_info((VALUE)p));
5346 GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, (VALUE)p));
5347 GC_ASSERT(RVALUE_MARKED(objspace, (VALUE)p));
5348 gc_mark_children(objspace, (VALUE)p);
5349 }
5350 p += slot_size;
5351 bits >>= 1;
5352 } while (bits);
5353 }
5354}
5355
5356static void
5357gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
5358{
5359 struct heap_page *page = 0;
5360
5361 ccan_list_for_each(&heap->pages, page, page_node) {
5362 bits_t *mark_bits = page->mark_bits;
5363 bits_t *wbun_bits = page->wb_unprotected_bits;
5364 uintptr_t p = page->start;
5365 short slot_size = page->slot_size;
5366 int total_slots = page->total_slots;
5367 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
5368 size_t j;
5369
5370 for (j=0; j<(size_t)bitmap_plane_count; j++) {
5371 bits_t bits = mark_bits[j] & wbun_bits[j];
5372 gc_marks_wb_unprotected_objects_plane(objspace, p, bits, slot_size);
5373 p += BITS_BITLENGTH * slot_size;
5374 }
5375 }
5376
5377 gc_mark_stacked_objects_all(objspace);
5378}
5379
5380void
5381rb_gc_impl_declare_weak_references(void *objspace_ptr, VALUE obj)
5382{
5384}
5385
5386bool
5387rb_gc_impl_handle_weak_references_alive_p(void *objspace_ptr, VALUE obj)
5388{
5389 rb_objspace_t *objspace = objspace_ptr;
5390
5391 bool marked = RVALUE_MARKED(objspace, obj);
5392
5393 if (marked) {
5394 rgengc_check_relation(objspace, obj);
5395 }
5396
5397 return marked;
5398}
5399
5400static void
5401gc_update_weak_references(rb_objspace_t *objspace)
5402{
5403 VALUE *obj_ptr;
5404 rb_darray_foreach(objspace->weak_references, i, obj_ptr) {
5405 gc_mark_set_parent(objspace, *obj_ptr);
5406 rb_gc_handle_weak_references(*obj_ptr);
5407 gc_mark_set_parent_invalid(objspace);
5408 }
5409
5410 size_t capa = rb_darray_capa(objspace->weak_references);
5411 size_t size = rb_darray_size(objspace->weak_references);
5412
5413 objspace->profile.weak_references_count = size;
5414
5415 rb_darray_clear(objspace->weak_references);
5416
5417 /* If the darray has capacity for more than four times the amount used, we
5418 * shrink it down to half of that capacity. */
5419 if (capa > size * 4) {
5420 rb_darray_resize_capa_without_gc(&objspace->weak_references, size * 2);
5421 }
5422}
5423
5424static void
5425gc_marks_finish(rb_objspace_t *objspace)
5426{
5427 /* finish incremental GC */
5428 if (is_incremental_marking(objspace)) {
5429 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
5430 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
5431 mark_stack_size(&objspace->mark_stack));
5432 }
5433
5434 mark_roots(objspace, NULL);
5435 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
5436
5437#if RGENGC_CHECK_MODE >= 2
5438 if (gc_verify_heap_pages(objspace) != 0) {
5439 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
5440 }
5441#endif
5442
5443 objspace->flags.during_incremental_marking = FALSE;
5444 /* check children of all marked wb-unprotected objects */
5445 for (int i = 0; i < HEAP_COUNT; i++) {
5446 gc_marks_wb_unprotected_objects(objspace, &heaps[i]);
5447 }
5448 }
5449
5450 gc_update_weak_references(objspace);
5451
5452#if RGENGC_CHECK_MODE >= 2
5453 gc_verify_internal_consistency(objspace);
5454#endif
5455
5456#if RGENGC_CHECK_MODE >= 4
5457 during_gc = FALSE;
5458 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
5459 during_gc = TRUE;
5460#endif
5461
5462 {
5463 const unsigned long r_mul = objspace->live_ractor_cache_count > 8 ? 8 : objspace->live_ractor_cache_count; // upto 8
5464
5465 size_t total_slots = objspace_available_slots(objspace);
5466 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
5467 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5468 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5469 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
5470 min_free_slots = gc_params.heap_free_slots * r_mul;
5471 }
5472
5473 int full_marking = is_full_marking(objspace);
5474
5475 GC_ASSERT(objspace_available_slots(objspace) >= objspace->marked_slots);
5476
5477 /* Setup freeable slots. */
5478 size_t total_init_slots = 0;
5479 for (int i = 0; i < HEAP_COUNT; i++) {
5480 total_init_slots += (gc_params.heap_init_bytes / heaps[i].slot_size) * r_mul;
5481 }
5482
5483 if (max_free_slots < total_init_slots) {
5484 max_free_slots = total_init_slots;
5485 }
5486
5487 /* Approximate freeable pages using the average slots-per-pages across all heaps */
5488 if (sweep_slots > max_free_slots) {
5489 size_t excess_slots = sweep_slots - max_free_slots;
5490 size_t total_heap_pages = heap_eden_total_pages(objspace);
5491 heap_pages_freeable_pages = total_heap_pages > 0
5492 ? excess_slots * total_heap_pages / total_slots
5493 : 0;
5494 }
5495 else {
5496 heap_pages_freeable_pages = 0;
5497 }
5498
5499 if (objspace->heap_pages.allocatable_bytes == 0 && sweep_slots < min_free_slots) {
5500 if (!full_marking && sweep_slots < min_free_slots * 7 / 8) {
5501 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5502 full_marking = TRUE;
5503 }
5504 else {
5505 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
5506 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5507 }
5508 }
5509
5510 if (full_marking) {
5511 heap_allocatable_bytes_expand(objspace, NULL, sweep_slots, total_slots, heaps[0].slot_size);
5512 }
5513 }
5514
5515 if (full_marking) {
5516 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
5517 const double r = gc_params.oldobject_limit_factor;
5518 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
5519 (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
5520 (size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
5521 );
5522 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5523 }
5524
5525 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5526 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
5527 }
5528 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
5529 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
5530 }
5531
5532 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
5533 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
5534 "sweep %"PRIdSIZE" slots, allocatable %"PRIdSIZE" bytes, next GC: %s)\n",
5535 objspace->marked_slots, objspace->rgengc.old_objects, objspace_available_slots(objspace), sweep_slots, objspace->heap_pages.allocatable_bytes,
5536 gc_needs_major_flags ? "major" : "minor");
5537 }
5538
5539 // TODO: refactor so we don't need to call this
5540 rb_ractor_finish_marking();
5541
5542 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_MARK);
5543}
5544
5545static bool
5546gc_compact_heap_cursors_met_p(rb_heap_t *heap)
5547{
5548 return heap->sweeping_page == heap->compact_cursor;
5549}
5550
5551
5552static rb_heap_t *
5553gc_compact_destination_pool(rb_objspace_t *objspace, rb_heap_t *src_pool, VALUE obj)
5554{
5555 size_t obj_size = rb_gc_obj_optimal_size(obj);
5556 if (obj_size == 0) {
5557 return src_pool;
5558 }
5559
5560 GC_ASSERT(rb_gc_impl_size_allocatable_p(obj_size));
5561
5562 size_t idx = heap_idx_for_size(obj_size);
5563
5564 return &heaps[idx];
5565}
5566
5567static bool
5568gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, VALUE src)
5569{
5570 GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
5571 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5572
5573 rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, heap, src);
5574 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5575 return dest_pool != heap;
5576 }
5577
5578 while (!try_move(objspace, dest_pool, dest_pool->free_pages, src)) {
5579 struct gc_sweep_context ctx = {
5580 .page = dest_pool->sweeping_page,
5581 .final_slots = 0,
5582 .freed_slots = 0,
5583 .empty_slots = 0,
5584 };
5585
5586 /* The page of src could be partially compacted, so it may contain
5587 * T_MOVED. Sweeping a page may read objects on this page, so we
5588 * need to lock the page. */
5589 lock_page_body(objspace, GET_PAGE_BODY(src));
5590 gc_sweep_page(objspace, dest_pool, &ctx);
5591 unlock_page_body(objspace, GET_PAGE_BODY(src));
5592
5593 if (dest_pool->sweeping_page->free_slots > 0) {
5594 heap_add_freepage(dest_pool, dest_pool->sweeping_page);
5595 }
5596
5597 dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node);
5598 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5599 return dest_pool != heap;
5600 }
5601 }
5602
5603 return true;
5604}
5605
5606static bool
5607gc_compact_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
5608{
5609 short slot_size = page->slot_size;
5610
5611 do {
5612 VALUE vp = (VALUE)p;
5613 GC_ASSERT(vp % sizeof(VALUE) == 0);
5614
5615 if (bitset & 1) {
5616 objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
5617
5618 if (gc_is_moveable_obj(objspace, vp)) {
5619 if (!gc_compact_move(objspace, heap, vp)) {
5620 //the cursors met. bubble up
5621 return false;
5622 }
5623 }
5624 }
5625 p += slot_size;
5626 bitset >>= 1;
5627 } while (bitset);
5628
5629 return true;
5630}
5631
5632// Iterate up all the objects in page, moving them to where they want to go
5633static bool
5634gc_compact_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
5635{
5636 GC_ASSERT(page == heap->compact_cursor);
5637
5638 bits_t *mark_bits, *pin_bits;
5639 bits_t bitset;
5640 uintptr_t p = page->start;
5641 short slot_size = page->slot_size;
5642 int total_slots = page->total_slots;
5643 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
5644
5645 mark_bits = page->mark_bits;
5646 pin_bits = page->pinned_bits;
5647
5648 for (int j = 0; j < bitmap_plane_count; j++) {
5649 // objects that can be moved are marked and not pinned
5650 bitset = (mark_bits[j] & ~pin_bits[j]);
5651 if (bitset) {
5652 if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5653 return false;
5654 }
5655 p += BITS_BITLENGTH * slot_size;
5656 }
5657
5658 return true;
5659}
5660
5661static bool
5662gc_compact_all_compacted_p(rb_objspace_t *objspace)
5663{
5664 for (int i = 0; i < HEAP_COUNT; i++) {
5665 rb_heap_t *heap = &heaps[i];
5666
5667 if (heap->total_pages > 0 &&
5668 !gc_compact_heap_cursors_met_p(heap)) {
5669 return false;
5670 }
5671 }
5672
5673 return true;
5674}
5675
5676static void
5677gc_sweep_compact(rb_objspace_t *objspace)
5678{
5679 gc_compact_start(objspace);
5680#if RGENGC_CHECK_MODE >= 2
5681 gc_verify_internal_consistency(objspace);
5682#endif
5683
5684 while (!gc_compact_all_compacted_p(objspace)) {
5685 for (int i = 0; i < HEAP_COUNT; i++) {
5686 rb_heap_t *heap = &heaps[i];
5687
5688 if (gc_compact_heap_cursors_met_p(heap)) {
5689 continue;
5690 }
5691
5692 struct heap_page *start_page = heap->compact_cursor;
5693
5694 if (!gc_compact_page(objspace, heap, start_page)) {
5695 lock_page_body(objspace, start_page->body);
5696
5697 continue;
5698 }
5699
5700 // If we get here, we've finished moving all objects on the compact_cursor page
5701 // So we can lock it and move the cursor on to the next one.
5702 lock_page_body(objspace, start_page->body);
5703 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
5704 }
5705 }
5706
5707 gc_compact_finish(objspace);
5708
5709#if RGENGC_CHECK_MODE >= 2
5710 gc_verify_internal_consistency(objspace);
5711#endif
5712}
5713
5714static void
5715gc_marks_rest(rb_objspace_t *objspace)
5716{
5717 gc_report(1, objspace, "gc_marks_rest\n");
5718
5719 for (int i = 0; i < HEAP_COUNT; i++) {
5720 (&heaps[i])->pooled_pages = NULL;
5721 }
5722
5723 if (is_incremental_marking(objspace)) {
5724 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
5725 }
5726 else {
5727 gc_mark_stacked_objects_all(objspace);
5728 }
5729
5730 gc_marks_finish(objspace);
5731}
5732
5733static bool
5734gc_marks_step(rb_objspace_t *objspace, size_t slots)
5735{
5736 bool marking_finished = false;
5737
5738 GC_ASSERT(is_marking(objspace));
5739 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5740 gc_marks_finish(objspace);
5741
5742 marking_finished = true;
5743 }
5744
5745 return marking_finished;
5746}
5747
5748static bool
5749gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
5750{
5751 GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
5752 bool marking_finished = true;
5753
5754 gc_marking_enter(objspace);
5755
5756 if (heap->free_pages) {
5757 gc_report(2, objspace, "gc_marks_continue: has pooled pages");
5758
5759 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
5760 }
5761 else {
5762 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
5763 mark_stack_size(&objspace->mark_stack));
5764 heap->force_incremental_marking_finish_count++;
5765 gc_marks_rest(objspace);
5766 }
5767
5768 gc_marking_exit(objspace);
5769
5770 return marking_finished;
5771}
5772
5773static void
5774gc_marks_start(rb_objspace_t *objspace, int full_mark)
5775{
5776 /* start marking */
5777 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
5778 gc_mode_transition(objspace, gc_mode_marking);
5779
5780 if (full_mark) {
5781 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
5782 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
5783
5784 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
5785 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
5786 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
5787 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
5788 objspace->flags.during_minor_gc = FALSE;
5789 if (ruby_enable_autocompact) {
5790 objspace->flags.during_compacting |= TRUE;
5791 }
5792 objspace->profile.major_gc_count++;
5793 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5794 objspace->rgengc.old_objects = 0;
5795 objspace->rgengc.last_major_gc = objspace->profile.count;
5796 objspace->marked_slots = 0;
5797
5798 for (int i = 0; i < HEAP_COUNT; i++) {
5799 rb_heap_t *heap = &heaps[i];
5800 rgengc_mark_and_rememberset_clear(objspace, heap);
5801 heap_move_pooled_pages_to_free_pages(heap);
5802
5803 if (objspace->flags.during_compacting) {
5804 struct heap_page *page = NULL;
5805
5806 ccan_list_for_each(&heap->pages, page, page_node) {
5807 page->pinned_slots = 0;
5808 }
5809 }
5810 }
5811 }
5812 else {
5813 objspace->flags.during_minor_gc = TRUE;
5814 objspace->marked_slots =
5815 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
5816 objspace->profile.minor_gc_count++;
5817
5818 for (int i = 0; i < HEAP_COUNT; i++) {
5819 rgengc_rememberset_mark(objspace, &heaps[i]);
5820 }
5821 }
5822
5823 mark_roots(objspace, NULL);
5824
5825 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
5826 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
5827}
5828
5829static bool
5830gc_marks(rb_objspace_t *objspace, int full_mark)
5831{
5832 gc_prof_mark_timer_start(objspace);
5833 gc_marking_enter(objspace);
5834
5835 bool marking_finished = false;
5836
5837 /* setup marking */
5838
5839 gc_marks_start(objspace, full_mark);
5840 if (!is_incremental_marking(objspace)) {
5841 gc_marks_rest(objspace);
5842 marking_finished = true;
5843 }
5844
5845#if RGENGC_PROFILE > 0
5846 if (gc_prof_record(objspace)) {
5847 gc_profile_record *record = gc_prof_record(objspace);
5848 record->old_objects = objspace->rgengc.old_objects;
5849 }
5850#endif
5851
5852 gc_marking_exit(objspace);
5853 gc_prof_mark_timer_stop(objspace);
5854
5855 return marking_finished;
5856}
5857
5858/* RGENGC */
5859
5860static void
5861gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
5862{
5863 if (level <= RGENGC_DEBUG) {
5864 char buf[1024];
5865 FILE *out = stderr;
5866 va_list args;
5867 const char *status = " ";
5868
5869 if (during_gc) {
5870 status = is_full_marking(objspace) ? "+" : "-";
5871 }
5872 else {
5873 if (is_lazy_sweeping(objspace)) {
5874 status = "S";
5875 }
5876 if (is_incremental_marking(objspace)) {
5877 status = "M";
5878 }
5879 }
5880
5881 va_start(args, fmt);
5882 vsnprintf(buf, 1024, fmt, args);
5883 va_end(args);
5884
5885 fprintf(out, "%s|", status);
5886 fputs(buf, out);
5887 }
5888}
5889
5890/* bit operations */
5891
5892static int
5893rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
5894{
5895 struct heap_page *page = GET_HEAP_PAGE(obj);
5896 bits_t *bits = &page->remembered_bits[0];
5897
5898 if (MARKED_IN_BITMAP(bits, obj)) {
5899 return FALSE;
5900 }
5901 else {
5902 page->flags.has_remembered_objects = TRUE;
5903 MARK_IN_BITMAP(bits, obj);
5904 return TRUE;
5905 }
5906}
5907
5908/* wb, etc */
5909
5910/* return FALSE if already remembered */
5911static int
5912rgengc_remember(rb_objspace_t *objspace, VALUE obj)
5913{
5914 gc_report(6, objspace, "rgengc_remember: %s %s\n", rb_obj_info(obj),
5915 RVALUE_REMEMBERED(objspace, obj) ? "was already remembered" : "is remembered now");
5916
5917 check_rvalue_consistency(objspace, obj);
5918
5919 if (RGENGC_CHECK_MODE) {
5920 if (RVALUE_WB_UNPROTECTED(objspace, obj)) rb_bug("rgengc_remember: %s is not wb protected.", rb_obj_info(obj));
5921 }
5922
5923#if RGENGC_PROFILE > 0
5924 if (!RVALUE_REMEMBERED(objspace, obj)) {
5925 if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0) {
5926 objspace->profile.total_remembered_normal_object_count++;
5927#if RGENGC_PROFILE >= 2
5928 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
5929#endif
5930 }
5931 }
5932#endif /* RGENGC_PROFILE > 0 */
5933
5934 return rgengc_remembersetbits_set(objspace, obj);
5935}
5936
5937#ifndef PROFILE_REMEMBERSET_MARK
5938#define PROFILE_REMEMBERSET_MARK 0
5939#endif
5940
5941static inline void
5942rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset, short slot_size)
5943{
5944 if (bitset) {
5945 do {
5946 if (bitset & 1) {
5947 VALUE obj = (VALUE)p;
5948 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", rb_obj_info(obj));
5949 GC_ASSERT(RVALUE_UNCOLLECTIBLE(objspace, obj));
5950 GC_ASSERT(RVALUE_OLD_P(objspace, obj) || RVALUE_WB_UNPROTECTED(objspace, obj));
5951
5952 gc_mark_children(objspace, obj);
5953
5955 rb_darray_append_without_gc(&objspace->weak_references, obj);
5956 }
5957 }
5958 p += slot_size;
5959 bitset >>= 1;
5960 } while (bitset);
5961 }
5962}
5963
5964static void
5965rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
5966{
5967 size_t j;
5968 struct heap_page *page = 0;
5969#if PROFILE_REMEMBERSET_MARK
5970 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5971#endif
5972 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
5973
5974 ccan_list_for_each(&heap->pages, page, page_node) {
5975 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
5976 uintptr_t p = page->start;
5977 short slot_size = page->slot_size;
5978 int total_slots = page->total_slots;
5979 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
5980 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
5981 bits_t *remembered_bits = page->remembered_bits;
5982 bits_t *uncollectible_bits = page->uncollectible_bits;
5983 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
5984#if PROFILE_REMEMBERSET_MARK
5985 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
5986 else if (page->flags.has_remembered_objects) has_old++;
5987 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
5988#endif
5989 for (j=0; j < (size_t)bitmap_plane_count; j++) {
5990 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5991 remembered_bits[j] = 0;
5992 }
5993 page->flags.has_remembered_objects = FALSE;
5994
5995 for (j=0; j < (size_t)bitmap_plane_count; j++) {
5996 bitset = bits[j];
5997 rgengc_rememberset_mark_plane(objspace, p, bitset, slot_size);
5998 p += BITS_BITLENGTH * slot_size;
5999 }
6000 }
6001#if PROFILE_REMEMBERSET_MARK
6002 else {
6003 skip++;
6004 }
6005#endif
6006 }
6007
6008#if PROFILE_REMEMBERSET_MARK
6009 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6010#endif
6011 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6012}
6013
6014static void
6015rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6016{
6017 struct heap_page *page = 0;
6018
6019 ccan_list_for_each(&heap->pages, page, page_node) {
6020 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6021 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6022 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6023 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6024 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6025 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
6026 page->flags.has_remembered_objects = FALSE;
6027 }
6028}
6029
6030/* RGENGC: APIs */
6031
6032NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6033
6034static void
6035gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6036{
6037 if (RGENGC_CHECK_MODE) {
6038 if (!RVALUE_OLD_P(objspace, a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", rb_obj_info(a));
6039 if ( RVALUE_OLD_P(objspace, b)) rb_bug("gc_writebarrier_generational: %s is an old object.", rb_obj_info(b));
6040 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", rb_obj_info(a), rb_obj_info(b));
6041 }
6042
6043 /* mark `a' and remember (default behavior) */
6044 if (!RVALUE_REMEMBERED(objspace, a)) {
6045 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6046 {
6047 rgengc_remember(objspace, a);
6048 }
6049 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6050
6051 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b));
6052 }
6053
6054 check_rvalue_consistency(objspace, a);
6055 check_rvalue_consistency(objspace, b);
6056}
6057
6058static void
6059gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6060{
6061 gc_mark_set_parent(objspace, parent);
6062 rgengc_check_relation(objspace, obj);
6063 if (gc_mark_set(objspace, obj) != FALSE) {
6064 gc_aging(objspace, obj);
6065 gc_grey(objspace, obj);
6066 }
6067 gc_mark_set_parent_invalid(objspace);
6068}
6069
6070NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6071
6072static void
6073gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6074{
6075 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, rb_obj_info(b));
6076
6077 if (RVALUE_BLACK_P(objspace, a)) {
6078 if (RVALUE_WHITE_P(objspace, b)) {
6079 if (!RVALUE_WB_UNPROTECTED(objspace, a)) {
6080 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, rb_obj_info(b));
6081 gc_mark_from(objspace, b, a);
6082 }
6083 }
6084 else if (RVALUE_OLD_P(objspace, a) && !RVALUE_OLD_P(objspace, b)) {
6085 rgengc_remember(objspace, a);
6086 }
6087
6088 if (RB_UNLIKELY(objspace->flags.during_compacting)) {
6089 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
6090 }
6091 }
6092}
6093
6094void
6095rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
6096{
6097 rb_objspace_t *objspace = objspace_ptr;
6098
6099#if RGENGC_CHECK_MODE
6100 if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
6101 if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
6102#else
6105#endif
6106
6107 GC_ASSERT(!during_gc);
6108 GC_ASSERT(RB_BUILTIN_TYPE(a) != T_NONE);
6109 GC_ASSERT(RB_BUILTIN_TYPE(a) != T_MOVED);
6110 GC_ASSERT(RB_BUILTIN_TYPE(a) != T_ZOMBIE);
6111 GC_ASSERT(RB_BUILTIN_TYPE(b) != T_NONE);
6112 GC_ASSERT(RB_BUILTIN_TYPE(b) != T_MOVED);
6113 GC_ASSERT(RB_BUILTIN_TYPE(b) != T_ZOMBIE);
6114
6115 retry:
6116 if (!is_incremental_marking(objspace)) {
6117 if (!RVALUE_OLD_P(objspace, a) || RVALUE_OLD_P(objspace, b)) {
6118 // do nothing
6119 }
6120 else {
6121 gc_writebarrier_generational(a, b, objspace);
6122 }
6123 }
6124 else {
6125 bool retry = false;
6126 /* slow path */
6127 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6128 {
6129 if (is_incremental_marking(objspace)) {
6130 gc_writebarrier_incremental(a, b, objspace);
6131 }
6132 else {
6133 retry = true;
6134 }
6135 }
6136 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6137
6138 if (retry) goto retry;
6139 }
6140 return;
6141}
6142
6143void
6144rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
6145{
6146 rb_objspace_t *objspace = objspace_ptr;
6147
6148 if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6149 return;
6150 }
6151 else {
6152 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj),
6153 RVALUE_REMEMBERED(objspace, obj) ? " (already remembered)" : "");
6154
6155 unsigned int lev = RB_GC_VM_LOCK_NO_BARRIER();
6156 {
6157 if (RVALUE_OLD_P(objspace, obj)) {
6158 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj));
6159 RVALUE_DEMOTE(objspace, obj);
6160 gc_mark_set(objspace, obj);
6161 gc_remember_unprotected(objspace, obj);
6162
6163#if RGENGC_PROFILE
6164 objspace->profile.total_shade_operation_count++;
6165#if RGENGC_PROFILE >= 2
6166 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6167#endif /* RGENGC_PROFILE >= 2 */
6168#endif /* RGENGC_PROFILE */
6169 }
6170 else {
6171 RVALUE_AGE_RESET(obj);
6172 }
6173
6174 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6175 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6176 }
6177 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6178 }
6179}
6180
6181void
6182rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
6183{
6184 rb_objspace_t *objspace = objspace_ptr;
6185
6186 if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6187 rb_gc_impl_writebarrier_unprotect(objspace, dest);
6188 }
6189 rb_gc_impl_copy_finalizer(objspace, dest, obj);
6190}
6191
6192const char *
6193rb_gc_impl_active_gc_name(void)
6194{
6195 return "default";
6196}
6197
6198void
6199rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
6200{
6201 rb_objspace_t *objspace = objspace_ptr;
6202
6203 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", rb_obj_info(obj));
6204
6205 if (is_incremental_marking(objspace) || RVALUE_OLD_P(objspace, obj)) {
6206 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6207 {
6208 if (is_incremental_marking(objspace)) {
6209 if (RVALUE_BLACK_P(objspace, obj)) {
6210 gc_grey(objspace, obj);
6211 }
6212 }
6213 else if (RVALUE_OLD_P(objspace, obj)) {
6214 rgengc_remember(objspace, obj);
6215 }
6216 }
6217 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6218 }
6219}
6220
6222 // Must be ID only
6223 ID ID_wb_protected, ID_age, ID_old, ID_uncollectible, ID_marking,
6224 ID_marked, ID_pinned, ID_remembered, ID_object_id, ID_shareable;
6225};
6226
6227#define RB_GC_OBJECT_METADATA_ENTRY_COUNT (sizeof(struct rb_gc_object_metadata_names) / sizeof(ID))
6228static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
6229
6231rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
6232{
6233 rb_objspace_t *objspace = objspace_ptr;
6234 size_t n = 0;
6235 static struct rb_gc_object_metadata_names names;
6236
6237 if (!names.ID_marked) {
6238#define I(s) names.ID_##s = rb_intern(#s)
6239 I(wb_protected);
6240 I(age);
6241 I(old);
6242 I(uncollectible);
6243 I(marking);
6244 I(marked);
6245 I(pinned);
6246 I(remembered);
6247 I(object_id);
6248 I(shareable);
6249#undef I
6250 }
6251
6252#define SET_ENTRY(na, v) do { \
6253 GC_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
6254 object_metadata_entries[n].name = names.ID_##na; \
6255 object_metadata_entries[n].val = v; \
6256 n++; \
6257} while (0)
6258
6259 if (!RVALUE_WB_UNPROTECTED(objspace, obj)) SET_ENTRY(wb_protected, Qtrue);
6260 SET_ENTRY(age, INT2FIX(RVALUE_AGE_GET(obj)));
6261 if (RVALUE_OLD_P(objspace, obj)) SET_ENTRY(old, Qtrue);
6262 if (RVALUE_UNCOLLECTIBLE(objspace, obj)) SET_ENTRY(uncollectible, Qtrue);
6263 if (RVALUE_MARKING(objspace, obj)) SET_ENTRY(marking, Qtrue);
6264 if (RVALUE_MARKED(objspace, obj)) SET_ENTRY(marked, Qtrue);
6265 if (RVALUE_PINNED(objspace, obj)) SET_ENTRY(pinned, Qtrue);
6266 if (RVALUE_REMEMBERED(objspace, obj)) SET_ENTRY(remembered, Qtrue);
6267 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
6268 if (FL_TEST(obj, FL_SHAREABLE)) SET_ENTRY(shareable, Qtrue);
6269
6270 object_metadata_entries[n].name = 0;
6271 object_metadata_entries[n].val = 0;
6272#undef SET_ENTRY
6273
6274 return object_metadata_entries;
6275}
6276
6277void *
6278rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
6279{
6280 rb_objspace_t *objspace = objspace_ptr;
6281
6282 objspace->live_ractor_cache_count++;
6283
6284 return calloc1(sizeof(rb_ractor_newobj_cache_t));
6285}
6286
6287void
6288rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
6289{
6290 rb_objspace_t *objspace = objspace_ptr;
6291
6292 objspace->live_ractor_cache_count--;
6293 gc_ractor_newobj_cache_clear(cache, NULL);
6294 free(cache);
6295}
6296
6297static void
6298heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
6299{
6300 if (!heap->free_pages) {
6301 if (!heap_page_allocate_and_initialize(objspace, heap)) {
6302 objspace->heap_pages.allocatable_bytes = HEAP_PAGE_SIZE;
6303 heap_page_allocate_and_initialize(objspace, heap);
6304 }
6305 }
6306}
6307
6308static int
6309ready_to_gc(rb_objspace_t *objspace)
6310{
6311 if (dont_gc_val() || during_gc) {
6312 for (int i = 0; i < HEAP_COUNT; i++) {
6313 rb_heap_t *heap = &heaps[i];
6314 heap_ready_to_gc(objspace, heap);
6315 }
6316 return FALSE;
6317 }
6318 else {
6319 return TRUE;
6320 }
6321}
6322
6323static void
6324gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
6325{
6326 gc_prof_set_malloc_info(objspace);
6327 {
6328 size_t inc = RUBY_ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
6329 size_t old_limit = malloc_limit;
6330
6331 if (inc > malloc_limit) {
6332 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6333 if (malloc_limit > gc_params.malloc_limit_max) {
6334 malloc_limit = gc_params.malloc_limit_max;
6335 }
6336 }
6337 else {
6338 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
6339 if (malloc_limit < gc_params.malloc_limit_min) {
6340 malloc_limit = gc_params.malloc_limit_min;
6341 }
6342 }
6343
6344 if (0) {
6345 if (old_limit != malloc_limit) {
6346 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
6347 rb_gc_count(), old_limit, malloc_limit);
6348 }
6349 else {
6350 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
6351 rb_gc_count(), malloc_limit);
6352 }
6353 }
6354 }
6355
6356 /* reset oldmalloc info */
6357#if RGENGC_ESTIMATE_OLDMALLOC
6358 if (!full_mark) {
6359 if (objspace->malloc_counters.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
6360 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6361 objspace->rgengc.oldmalloc_increase_limit =
6362 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6363
6364 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6365 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6366 }
6367 }
6368
6369 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
6370 rb_gc_count(),
6371 gc_needs_major_flags,
6372 objspace->malloc_counters.oldmalloc_increase,
6373 objspace->rgengc.oldmalloc_increase_limit,
6374 gc_params.oldmalloc_limit_max);
6375 }
6376 else {
6377 /* major GC */
6378 objspace->malloc_counters.oldmalloc_increase = 0;
6379
6380 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6381 objspace->rgengc.oldmalloc_increase_limit =
6382 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6383 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6384 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6385 }
6386 }
6387 }
6388#endif
6389}
6390
6391static int
6392garbage_collect(rb_objspace_t *objspace, unsigned int reason)
6393{
6394 int ret;
6395
6396 int lev = RB_GC_VM_LOCK();
6397 {
6398#if GC_PROFILE_MORE_DETAIL
6399 objspace->profile.prepare_time = getrusage_time();
6400#endif
6401
6402 gc_rest(objspace);
6403
6404#if GC_PROFILE_MORE_DETAIL
6405 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
6406#endif
6407
6408 ret = gc_start(objspace, reason);
6409 }
6410 RB_GC_VM_UNLOCK(lev);
6411
6412 return ret;
6413}
6414
6415static int
6416gc_start(rb_objspace_t *objspace, unsigned int reason)
6417{
6418 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
6419
6420 if (!rb_darray_size(objspace->heap_pages.sorted)) return TRUE; /* heap is not ready */
6421 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
6422
6423 rb_gc_initialize_vm_context(&objspace->vm_context);
6424
6425 GC_ASSERT(gc_mode(objspace) == gc_mode_none, "gc_mode is %s\n", gc_mode_name(gc_mode(objspace)));
6426 GC_ASSERT(!is_lazy_sweeping(objspace));
6427 GC_ASSERT(!is_incremental_marking(objspace));
6428
6429 unsigned int lock_lev;
6430 gc_enter(objspace, gc_enter_event_start, &lock_lev);
6431
6432 /* reason may be clobbered, later, so keep set immediate_sweep here */
6433 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
6434
6435#if RGENGC_CHECK_MODE >= 2
6436 gc_verify_internal_consistency(objspace);
6437#endif
6438
6439 if (ruby_gc_stressful) {
6440 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
6441
6442 if ((flag & (1 << gc_stress_no_major)) == 0) {
6443 do_full_mark = TRUE;
6444 }
6445
6446 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6447 }
6448
6449 if (gc_needs_major_flags) {
6450 reason |= gc_needs_major_flags;
6451 do_full_mark = TRUE;
6452 }
6453
6454 /* if major gc has been disabled, never do a full mark */
6455 if (!gc_config_full_mark_val) {
6456 do_full_mark = FALSE;
6457 }
6458 gc_needs_major_flags = GPR_FLAG_NONE;
6459
6460 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6461 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
6462 }
6463
6464 if (objspace->flags.dont_incremental ||
6465 reason & GPR_FLAG_IMMEDIATE_MARK ||
6466 ruby_gc_stressful) {
6467 objspace->flags.during_incremental_marking = FALSE;
6468 }
6469 else {
6470 objspace->flags.during_incremental_marking = do_full_mark;
6471 }
6472
6473 /* Explicitly enable compaction (GC.compact) */
6474 if (do_full_mark && ruby_enable_autocompact) {
6475 objspace->flags.during_compacting = TRUE;
6476#if RGENGC_CHECK_MODE
6477 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
6478#endif
6479 }
6480 else {
6481 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
6482 }
6483
6484 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
6485 objspace->flags.immediate_sweep = TRUE;
6486 }
6487
6488 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6489
6490 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
6491 reason,
6492 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6493
6494 RB_DEBUG_COUNTER_INC(gc_count);
6495
6496 if (reason & GPR_FLAG_MAJOR_MASK) {
6497 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6498 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6499 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6500 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6501#if RGENGC_ESTIMATE_OLDMALLOC
6502 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6503#endif
6504 }
6505 else {
6506 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6507 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6508 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6509 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6510 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6511 }
6512
6513 objspace->profile.count++;
6514 objspace->profile.latest_gc_info = reason;
6515 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
6516 objspace->profile.heap_used_at_gc_start = rb_darray_size(objspace->heap_pages.sorted);
6517 objspace->profile.heap_total_slots_at_gc_start = objspace_available_slots(objspace);
6518 objspace->profile.weak_references_count = 0;
6519 gc_prof_setup_new_record(objspace, reason);
6520 gc_reset_malloc_info(objspace, do_full_mark);
6521
6522 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
6523
6524 GC_ASSERT(during_gc);
6525
6526 gc_prof_timer_start(objspace);
6527 {
6528 if (gc_marks(objspace, do_full_mark)) {
6529 gc_sweep(objspace);
6530 }
6531 }
6532 gc_prof_timer_stop(objspace);
6533
6534 gc_exit(objspace, gc_enter_event_start, &lock_lev);
6535 return TRUE;
6536}
6537
6538static void
6539gc_rest(rb_objspace_t *objspace)
6540{
6541 if (is_incremental_marking(objspace) || is_lazy_sweeping(objspace)) {
6542 unsigned int lock_lev;
6543 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
6544
6545 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
6546
6547 if (is_incremental_marking(objspace)) {
6548 gc_marking_enter(objspace);
6549 gc_marks_rest(objspace);
6550 gc_marking_exit(objspace);
6551
6552 gc_sweep(objspace);
6553 }
6554
6555 if (is_lazy_sweeping(objspace)) {
6556 gc_sweeping_enter(objspace);
6557 gc_sweep_rest(objspace);
6558 gc_sweeping_exit(objspace);
6559 }
6560
6561 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
6562 }
6563}
6564
6567 unsigned int reason;
6568};
6569
6570static void
6571gc_current_status_fill(rb_objspace_t *objspace, char *buff)
6572{
6573 int i = 0;
6574 if (is_marking(objspace)) {
6575 buff[i++] = 'M';
6576 if (is_full_marking(objspace)) buff[i++] = 'F';
6577 if (is_incremental_marking(objspace)) buff[i++] = 'I';
6578 }
6579 else if (is_sweeping(objspace)) {
6580 buff[i++] = 'S';
6581 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
6582 }
6583 else {
6584 buff[i++] = 'N';
6585 }
6586 buff[i] = '\0';
6587}
6588
6589static const char *
6590gc_current_status(rb_objspace_t *objspace)
6591{
6592 static char buff[0x10];
6593 gc_current_status_fill(objspace, buff);
6594 return buff;
6595}
6596
6597#if PRINT_ENTER_EXIT_TICK
6598
6599static tick_t last_exit_tick;
6600static tick_t enter_tick;
6601static int enter_count = 0;
6602static char last_gc_status[0x10];
6603
6604static inline void
6605gc_record(rb_objspace_t *objspace, int direction, const char *event)
6606{
6607 if (direction == 0) { /* enter */
6608 enter_count++;
6609 enter_tick = tick();
6610 gc_current_status_fill(objspace, last_gc_status);
6611 }
6612 else { /* exit */
6613 tick_t exit_tick = tick();
6614 char current_gc_status[0x10];
6615 gc_current_status_fill(objspace, current_gc_status);
6616#if 1
6617 /* [last mutator time] [gc time] [event] */
6618 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6619 enter_tick - last_exit_tick,
6620 exit_tick - enter_tick,
6621 event,
6622 last_gc_status, current_gc_status,
6623 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6624 last_exit_tick = exit_tick;
6625#else
6626 /* [enter_tick] [gc time] [event] */
6627 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6628 enter_tick,
6629 exit_tick - enter_tick,
6630 event,
6631 last_gc_status, current_gc_status,
6632 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6633#endif
6634 }
6635}
6636#else /* PRINT_ENTER_EXIT_TICK */
6637static inline void
6638gc_record(rb_objspace_t *objspace, int direction, const char *event)
6639{
6640 /* null */
6641}
6642#endif /* PRINT_ENTER_EXIT_TICK */
6643
6644static const char *
6645gc_enter_event_cstr(enum gc_enter_event event)
6646{
6647 switch (event) {
6648 case gc_enter_event_start: return "start";
6649 case gc_enter_event_continue: return "continue";
6650 case gc_enter_event_rest: return "rest";
6651 case gc_enter_event_finalizer: return "finalizer";
6652 }
6653 return NULL;
6654}
6655
6656static void
6657gc_enter_count(enum gc_enter_event event)
6658{
6659 switch (event) {
6660 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
6661 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue); break;
6662 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
6663 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
6664 }
6665}
6666
6667static bool current_process_time(struct timespec *ts);
6668
6669static void
6670gc_clock_start(struct timespec *ts)
6671{
6672 if (!current_process_time(ts)) {
6673 ts->tv_sec = 0;
6674 ts->tv_nsec = 0;
6675 }
6676}
6677
6678static unsigned long long
6679gc_clock_end(struct timespec *ts)
6680{
6681 struct timespec end_time;
6682
6683 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
6684 current_process_time(&end_time) &&
6685 end_time.tv_sec >= ts->tv_sec) {
6686 return (unsigned long long)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
6687 (end_time.tv_nsec - ts->tv_nsec);
6688 }
6689
6690 return 0;
6691}
6692
6693static inline void
6694gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6695{
6696 *lock_lev = RB_GC_VM_LOCK();
6697
6698 switch (event) {
6699 case gc_enter_event_rest:
6700 case gc_enter_event_start:
6701 case gc_enter_event_continue:
6702 // stop other ractors
6703 rb_gc_vm_barrier();
6704 break;
6705 default:
6706 break;
6707 }
6708
6709 gc_enter_count(event);
6710 if (RB_UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
6711 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
6712
6713 during_gc = TRUE;
6714 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
6715 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6716 gc_record(objspace, 0, gc_enter_event_cstr(event));
6717
6718 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_ENTER);
6719}
6720
6721static inline void
6722gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6723{
6724 GC_ASSERT(during_gc != 0);
6725
6726 rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_EXIT);
6727
6728 gc_record(objspace, 1, gc_enter_event_cstr(event));
6729 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
6730 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6731 during_gc = FALSE;
6732
6733 RB_GC_VM_UNLOCK(*lock_lev);
6734}
6735
6736#ifndef MEASURE_GC
6737#define MEASURE_GC (objspace->flags.measure_gc)
6738#endif
6739
6740static void
6741gc_marking_enter(rb_objspace_t *objspace)
6742{
6743 GC_ASSERT(during_gc != 0);
6744
6745 if (MEASURE_GC) {
6746 gc_clock_start(&objspace->profile.marking_start_time);
6747 }
6748
6749 rb_gc_initialize_vm_context(&objspace->vm_context);
6750}
6751
6752static void
6753gc_marking_exit(rb_objspace_t *objspace)
6754{
6755 GC_ASSERT(during_gc != 0);
6756
6757 if (MEASURE_GC) {
6758 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
6759 }
6760}
6761
6762static void
6763gc_sweeping_enter(rb_objspace_t *objspace)
6764{
6765 GC_ASSERT(during_gc != 0);
6766
6767 if (MEASURE_GC) {
6768 gc_clock_start(&objspace->profile.sweeping_start_time);
6769 }
6770}
6771
6772static void
6773gc_sweeping_exit(rb_objspace_t *objspace)
6774{
6775 GC_ASSERT(during_gc != 0);
6776
6777 if (MEASURE_GC) {
6778 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
6779 }
6780}
6781
6782static void *
6783gc_with_gvl(void *ptr)
6784{
6785 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
6786 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
6787}
6788
6789int ruby_thread_has_gvl_p(void);
6790
6791static int
6792garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
6793{
6794 if (dont_gc_val()) {
6795 return TRUE;
6796 }
6797 else if (!ruby_native_thread_p()) {
6798 return TRUE;
6799 }
6800 else if (!ruby_thread_has_gvl_p()) {
6801 void *ret;
6802 struct objspace_and_reason oar;
6803 oar.objspace = objspace;
6804 oar.reason = reason;
6805 ret = rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
6806
6807 return !!ret;
6808 }
6809 else {
6810 return garbage_collect(objspace, reason);
6811 }
6812}
6813
6814static int
6815gc_set_candidate_object_i(void *vstart, void *vend, size_t stride, void *data)
6816{
6818
6819 VALUE v = (VALUE)vstart;
6820 for (; v != (VALUE)vend; v += stride) {
6821 asan_unpoisoning_object(v) {
6822 switch (BUILTIN_TYPE(v)) {
6823 case T_NONE:
6824 case T_ZOMBIE:
6825 break;
6826 default:
6827 rb_gc_prepare_heap_process_object(v);
6828 if (!RVALUE_OLD_P(objspace, v) && !RVALUE_WB_UNPROTECTED(objspace, v)) {
6829 RVALUE_AGE_SET_CANDIDATE(objspace, v);
6830 }
6831 }
6832 }
6833 }
6834
6835 return 0;
6836}
6837
6838void
6839rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
6840{
6841 rb_objspace_t *objspace = objspace_ptr;
6842 unsigned int reason = (GPR_FLAG_FULL_MARK |
6843 GPR_FLAG_IMMEDIATE_MARK |
6844 GPR_FLAG_IMMEDIATE_SWEEP |
6845 GPR_FLAG_METHOD);
6846
6847 int full_marking_p = gc_config_full_mark_val;
6848 gc_config_full_mark_set(TRUE);
6849
6850 /* For now, compact implies full mark / sweep, so ignore other flags */
6851 if (compact) {
6852 GC_ASSERT(GC_COMPACTION_SUPPORTED);
6853
6854 reason |= GPR_FLAG_COMPACT;
6855 }
6856 else {
6857 if (!full_mark) reason &= ~GPR_FLAG_FULL_MARK;
6858 if (!immediate_mark) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6859 if (!immediate_sweep) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6860 }
6861
6862 garbage_collect(objspace, reason);
6863 gc_finalize_deferred(objspace);
6864
6865 gc_config_full_mark_set(full_marking_p);
6866}
6867
6868void
6869rb_gc_impl_prepare_heap(void *objspace_ptr)
6870{
6871 rb_objspace_t *objspace = objspace_ptr;
6872
6873 size_t orig_total_slots = objspace_available_slots(objspace);
6874 size_t orig_allocatable_bytes = objspace->heap_pages.allocatable_bytes;
6875
6876 rb_gc_impl_each_objects(objspace, gc_set_candidate_object_i, objspace_ptr);
6877
6878 double orig_max_free_slots = gc_params.heap_free_slots_max_ratio;
6879 /* Ensure that all empty pages are moved onto empty_pages. */
6880 gc_params.heap_free_slots_max_ratio = 0.0;
6881 rb_gc_impl_start(objspace, true, true, true, true);
6882 gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6883
6884 objspace->heap_pages.allocatable_bytes = 0;
6885 heap_pages_freeable_pages = objspace->empty_pages_count;
6886 heap_pages_free_unused_pages(objspace_ptr);
6887 GC_ASSERT(heap_pages_freeable_pages == 0);
6888 GC_ASSERT(objspace->empty_pages_count == 0);
6889 objspace->heap_pages.allocatable_bytes = orig_allocatable_bytes;
6890
6891 size_t total_slots = objspace_available_slots(objspace);
6892 if (orig_total_slots > total_slots) {
6893 objspace->heap_pages.allocatable_bytes += (orig_total_slots - total_slots) * heaps[0].slot_size;
6894 }
6895
6896#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
6897 malloc_trim(0);
6898#endif
6899}
6900
6901static int
6902gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
6903{
6904 GC_ASSERT(!SPECIAL_CONST_P(obj));
6905
6906 switch (BUILTIN_TYPE(obj)) {
6907 case T_NONE:
6908 case T_MOVED:
6909 case T_ZOMBIE:
6910 return FALSE;
6911 case T_SYMBOL:
6912 case T_STRING:
6913 case T_OBJECT:
6914 case T_FLOAT:
6915 case T_IMEMO:
6916 case T_ARRAY:
6917 case T_BIGNUM:
6918 case T_ICLASS:
6919 case T_MODULE:
6920 case T_REGEXP:
6921 case T_DATA:
6922 case T_MATCH:
6923 case T_STRUCT:
6924 case T_HASH:
6925 case T_FILE:
6926 case T_COMPLEX:
6927 case T_RATIONAL:
6928 case T_NODE:
6929 case T_CLASS:
6930 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
6931 /* The finalizer table is a numtable. It looks up objects by address.
6932 * We can't mark the keys in the finalizer table because that would
6933 * prevent the objects from being collected. This check prevents
6934 * objects that are keys in the finalizer table from being moved
6935 * without directly pinning them. */
6936 GC_ASSERT(st_is_member(finalizer_table, obj));
6937
6938 return FALSE;
6939 }
6940 GC_ASSERT(RVALUE_MARKED(objspace, obj));
6941 GC_ASSERT(!RVALUE_PINNED(objspace, obj));
6942
6943 return TRUE;
6944
6945 default:
6946 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
6947 break;
6948 }
6949
6950 return FALSE;
6951}
6952
6953void rb_mv_generic_ivar(VALUE src, VALUE dst);
6954
6955static VALUE
6956gc_move(rb_objspace_t *objspace, VALUE src, VALUE dest, struct heap_page *src_page, struct heap_page *dest_page)
6957{
6958 size_t src_slot_size = src_page->slot_size;
6959 size_t slot_size = dest_page->slot_size;
6960
6961 int marked;
6962 int wb_unprotected;
6963 int uncollectible;
6964 int age;
6965
6966 gc_report(4, objspace, "Moving object: %p -> %p\n", (void *)src, (void *)dest);
6967
6968 GC_ASSERT(BUILTIN_TYPE(src) != T_NONE);
6969 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
6970
6971 GC_ASSERT(!RVALUE_MARKING(objspace, src));
6972
6973 /* Save off bits for current object. */
6974 marked = RVALUE_MARKED(objspace, src);
6975 wb_unprotected = RVALUE_WB_UNPROTECTED(objspace, src);
6976 uncollectible = RVALUE_UNCOLLECTIBLE(objspace, src);
6977 bool remembered = RVALUE_REMEMBERED(objspace, src);
6978 age = RVALUE_AGE_GET(src);
6979
6980 /* Clear bits for eventual T_MOVED */
6981 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(src), src);
6982 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(src), src);
6983 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
6984 CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
6985
6986 /* Move the object */
6987 memcpy((void *)dest, (void *)src, MIN(src_slot_size, slot_size));
6988
6989 if (src_slot_size != slot_size && RB_TYPE_P(src, T_OBJECT)) {
6990 rb_gc_obj_changed_pool(dest, dest_page->heap - heaps);
6991 }
6992
6993 if (RVALUE_OVERHEAD > 0) {
6994 void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
6995 void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
6996
6997 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
6998 }
6999
7000 memset((void *)src, 0, src_slot_size);
7001 RVALUE_AGE_SET_BITMAP(src, 0);
7002
7003 /* Set bits for object in new location */
7004 if (remembered) {
7005 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
7006 }
7007 else {
7008 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
7009 }
7010
7011 if (marked) {
7012 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
7013 }
7014 else {
7015 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
7016 }
7017
7018 if (wb_unprotected) {
7019 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7020 }
7021 else {
7022 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7023 }
7024
7025 if (uncollectible) {
7026 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7027 }
7028 else {
7029 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7030 }
7031
7032 RVALUE_AGE_SET(dest, age);
7033 /* Assign forwarding address */
7034 RMOVED(src)->flags = T_MOVED;
7035 RMOVED(src)->dummy = Qundef;
7036 RMOVED(src)->destination = dest;
7037 GC_ASSERT(BUILTIN_TYPE(dest) != T_NONE);
7038
7039 GET_HEAP_PAGE(src)->heap->total_freed_objects++;
7040 GET_HEAP_PAGE(dest)->heap->total_allocated_objects++;
7041
7042 return src;
7043}
7044
7045#if GC_CAN_COMPILE_COMPACTION
7046static int
7047compare_pinned_slots(const void *left, const void *right, void *dummy)
7048{
7049 struct heap_page *left_page;
7050 struct heap_page *right_page;
7051
7052 left_page = *(struct heap_page * const *)left;
7053 right_page = *(struct heap_page * const *)right;
7054
7055 return left_page->pinned_slots - right_page->pinned_slots;
7056}
7057
7058static int
7059compare_free_slots(const void *left, const void *right, void *dummy)
7060{
7061 struct heap_page *left_page;
7062 struct heap_page *right_page;
7063
7064 left_page = *(struct heap_page * const *)left;
7065 right_page = *(struct heap_page * const *)right;
7066
7067 return left_page->free_slots - right_page->free_slots;
7068}
7069
7070static void
7071gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func)
7072{
7073 for (int j = 0; j < HEAP_COUNT; j++) {
7074 rb_heap_t *heap = &heaps[j];
7075
7076 size_t total_pages = heap->total_pages;
7077 size_t size = rb_size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7078 struct heap_page *page = 0, **page_list = malloc(size);
7079 size_t i = 0;
7080
7081 heap->free_pages = NULL;
7082 ccan_list_for_each(&heap->pages, page, page_node) {
7083 page_list[i++] = page;
7084 GC_ASSERT(page);
7085 }
7086
7087 GC_ASSERT((size_t)i == total_pages);
7088
7089 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
7090 * head of the list, so empty pages will end up at the start of the heap */
7091 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL);
7092
7093 /* Reset the eden heap */
7094 ccan_list_head_init(&heap->pages);
7095
7096 for (i = 0; i < total_pages; i++) {
7097 ccan_list_add(&heap->pages, &page_list[i]->page_node);
7098 if (page_list[i]->free_slots != 0) {
7099 heap_add_freepage(heap, page_list[i]);
7100 }
7101 }
7102
7103 free(page_list);
7104 }
7105}
7106#endif
7107
7108void
7109rb_gc_impl_register_pinning_obj(void *objspace_ptr, VALUE obj)
7110{
7111 /* no-op */
7112}
7113
7114bool
7115rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
7116{
7117 return gc_object_moved_p(objspace_ptr, obj);
7118}
7119
7120static int
7121gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t *objspace, struct heap_page *page)
7122{
7123 VALUE v = (VALUE)vstart;
7124
7125 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
7126 page->flags.has_remembered_objects = FALSE;
7127
7128 /* For each object on the page */
7129 for (; v != (VALUE)vend; v += stride) {
7130 asan_unpoisoning_object(v) {
7131 switch (BUILTIN_TYPE(v)) {
7132 case T_NONE:
7133 case T_MOVED:
7134 case T_ZOMBIE:
7135 break;
7136 default:
7137 if (RVALUE_WB_UNPROTECTED(objspace, v)) {
7138 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
7139 }
7140 if (RVALUE_REMEMBERED(objspace, v)) {
7141 page->flags.has_remembered_objects = TRUE;
7142 }
7143 if (page->flags.before_sweep) {
7144 if (RVALUE_MARKED(objspace, v)) {
7145 rb_gc_update_object_references(objspace, v);
7146 }
7147 }
7148 else {
7149 rb_gc_update_object_references(objspace, v);
7150 }
7151 }
7152 }
7153 }
7154
7155 return 0;
7156}
7157
7158static int
7159gc_update_references_weak_table_i(VALUE obj, void *data)
7160{
7161 int ret;
7162 asan_unpoisoning_object(obj) {
7163 ret = BUILTIN_TYPE(obj) == T_MOVED ? ST_REPLACE : ST_CONTINUE;
7164 }
7165 return ret;
7166}
7167
7168static int
7169gc_update_references_weak_table_replace_i(VALUE *obj, void *data)
7170{
7171 *obj = rb_gc_location(*obj);
7172
7173 return ST_CONTINUE;
7174}
7175
7176static void
7177gc_update_references(rb_objspace_t *objspace)
7178{
7179 objspace->flags.during_reference_updating = true;
7180
7181 rb_gc_before_updating_jit_code();
7182
7183 struct heap_page *page = NULL;
7184
7185 for (int i = 0; i < HEAP_COUNT; i++) {
7186 bool should_set_mark_bits = TRUE;
7187 rb_heap_t *heap = &heaps[i];
7188
7189 ccan_list_for_each(&heap->pages, page, page_node) {
7190 uintptr_t start = (uintptr_t)page->start;
7191 uintptr_t end = start + (page->total_slots * heap->slot_size);
7192
7193 gc_ref_update((void *)start, (void *)end, heap->slot_size, objspace, page);
7194 if (page == heap->sweeping_page) {
7195 should_set_mark_bits = FALSE;
7196 }
7197 if (should_set_mark_bits) {
7198 gc_setup_mark_bits(page);
7199 }
7200 }
7201 }
7202
7203 gc_update_table_refs(finalizer_table);
7204
7205 rb_gc_update_vm_references((void *)objspace);
7206
7207 for (int table = 0; table < RB_GC_VM_WEAK_TABLE_COUNT; table++) {
7208 rb_gc_vm_weak_table_foreach(
7209 gc_update_references_weak_table_i,
7210 gc_update_references_weak_table_replace_i,
7211 NULL,
7212 false,
7213 table
7214 );
7215 }
7216
7217 rb_gc_after_updating_jit_code();
7218
7219 objspace->flags.during_reference_updating = false;
7220}
7221
7222#if GC_CAN_COMPILE_COMPACTION
7223static void
7224root_obj_check_moved_i(const char *category, VALUE obj, void *data)
7225{
7226 rb_objspace_t *objspace = data;
7227
7228 if (gc_object_moved_p(objspace, obj)) {
7229 rb_bug("ROOT %s points to MOVED: %p -> %s", category, (void *)obj, rb_obj_info(rb_gc_impl_location(objspace, obj)));
7230 }
7231}
7232
7233static void
7234reachable_object_check_moved_i(VALUE ref, void *data)
7235{
7236 VALUE parent = (VALUE)data;
7237 if (gc_object_moved_p(rb_gc_get_objspace(), ref)) {
7238 rb_bug("Object %s points to MOVED: %p -> %s", rb_obj_info(parent), (void *)ref, rb_obj_info(rb_gc_impl_location(rb_gc_get_objspace(), ref)));
7239 }
7240}
7241
7242static int
7243heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
7244{
7245 rb_objspace_t *objspace = data;
7246
7247 VALUE v = (VALUE)vstart;
7248 for (; v != (VALUE)vend; v += stride) {
7249 if (gc_object_moved_p(objspace, v)) {
7250 /* Moved object still on the heap, something may have a reference. */
7251 }
7252 else {
7253 asan_unpoisoning_object(v) {
7254 switch (BUILTIN_TYPE(v)) {
7255 case T_NONE:
7256 case T_ZOMBIE:
7257 break;
7258 default:
7259 if (!rb_gc_impl_garbage_object_p(objspace, v)) {
7260 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
7261 }
7262 }
7263 }
7264 }
7265 }
7266
7267 return 0;
7268}
7269#endif
7270
7271bool
7272rb_gc_impl_during_gc_p(void *objspace_ptr)
7273{
7274 rb_objspace_t *objspace = objspace_ptr;
7275
7276 return during_gc;
7277}
7278
7279#if RGENGC_PROFILE >= 2
7280
7281static const char*
7282type_name(int type, VALUE obj)
7283{
7284 switch ((enum ruby_value_type)type) {
7285 case RUBY_T_NONE: return "T_NONE";
7286 case RUBY_T_OBJECT: return "T_OBJECT";
7287 case RUBY_T_CLASS: return "T_CLASS";
7288 case RUBY_T_MODULE: return "T_MODULE";
7289 case RUBY_T_FLOAT: return "T_FLOAT";
7290 case RUBY_T_STRING: return "T_STRING";
7291 case RUBY_T_REGEXP: return "T_REGEXP";
7292 case RUBY_T_ARRAY: return "T_ARRAY";
7293 case RUBY_T_HASH: return "T_HASH";
7294 case RUBY_T_STRUCT: return "T_STRUCT";
7295 case RUBY_T_BIGNUM: return "T_BIGNUM";
7296 case RUBY_T_FILE: return "T_FILE";
7297 case RUBY_T_DATA: return "T_DATA";
7298 case RUBY_T_MATCH: return "T_MATCH";
7299 case RUBY_T_COMPLEX: return "T_COMPLEX";
7300 case RUBY_T_RATIONAL: return "T_RATIONAL";
7301 case RUBY_T_NIL: return "T_NIL";
7302 case RUBY_T_TRUE: return "T_TRUE";
7303 case RUBY_T_FALSE: return "T_FALSE";
7304 case RUBY_T_SYMBOL: return "T_SYMBOL";
7305 case RUBY_T_FIXNUM: return "T_FIXNUM";
7306 case RUBY_T_UNDEF: return "T_UNDEF";
7307 case RUBY_T_IMEMO: return "T_IMEMO";
7308 case RUBY_T_NODE: return "T_NODE";
7309 case RUBY_T_ICLASS: return "T_ICLASS";
7310 case RUBY_T_ZOMBIE: return "T_ZOMBIE";
7311 case RUBY_T_MOVED: return "T_MOVED";
7312 default: return "unknown";
7313 }
7314}
7315
7316static void
7317gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
7318{
7319 VALUE result = rb_hash_new_with_size(T_MASK);
7320 int i;
7321 for (i=0; i<T_MASK; i++) {
7322 const char *type = type_name(i, 0);
7323 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
7324 }
7325 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
7326}
7327#endif
7328
7329size_t
7330rb_gc_impl_gc_count(void *objspace_ptr)
7331{
7332 rb_objspace_t *objspace = objspace_ptr;
7333
7334 return objspace->profile.count;
7335}
7336
7337static VALUE
7338gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
7339{
7340 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
7341 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
7342#if RGENGC_ESTIMATE_OLDMALLOC
7343 static VALUE sym_oldmalloc;
7344#endif
7345 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
7346 static VALUE sym_none, sym_marking, sym_sweeping;
7347 static VALUE sym_weak_references_count;
7348 VALUE hash = Qnil, key = Qnil;
7349 VALUE major_by, need_major_by;
7350 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
7351
7352 if (SYMBOL_P(hash_or_key)) {
7353 key = hash_or_key;
7354 }
7355 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
7356 hash = hash_or_key;
7357 }
7358 else {
7359 rb_bug("gc_info_decode: non-hash or symbol given");
7360 }
7361
7362 if (NIL_P(sym_major_by)) {
7363#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
7364 S(major_by);
7365 S(gc_by);
7366 S(immediate_sweep);
7367 S(have_finalizer);
7368 S(state);
7369 S(need_major_by);
7370
7371 S(stress);
7372 S(nofree);
7373 S(oldgen);
7374 S(shady);
7375 S(force);
7376#if RGENGC_ESTIMATE_OLDMALLOC
7377 S(oldmalloc);
7378#endif
7379 S(newobj);
7380 S(malloc);
7381 S(method);
7382 S(capi);
7383
7384 S(none);
7385 S(marking);
7386 S(sweeping);
7387
7388 S(weak_references_count);
7389#undef S
7390 }
7391
7392#define SET(name, attr) \
7393 if (key == sym_##name) \
7394 return (attr); \
7395 else if (hash != Qnil) \
7396 rb_hash_aset(hash, sym_##name, (attr));
7397
7398 major_by =
7399 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7400 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7401 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7402 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7403#if RGENGC_ESTIMATE_OLDMALLOC
7404 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7405#endif
7406 Qnil;
7407 SET(major_by, major_by);
7408
7409 if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
7410 unsigned int need_major_flags = gc_needs_major_flags;
7411 need_major_by =
7412 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7413 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7414 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7415 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7416#if RGENGC_ESTIMATE_OLDMALLOC
7417 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7418#endif
7419 Qnil;
7420 SET(need_major_by, need_major_by);
7421 }
7422
7423 SET(gc_by,
7424 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7425 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7426 (flags & GPR_FLAG_METHOD) ? sym_method :
7427 (flags & GPR_FLAG_CAPI) ? sym_capi :
7428 (flags & GPR_FLAG_STRESS) ? sym_stress :
7429 Qnil
7430 );
7431
7432 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
7433 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
7434
7435 if (orig_flags == 0) {
7436 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
7437 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7438 }
7439
7440 SET(weak_references_count, LONG2FIX(objspace->profile.weak_references_count));
7441#undef SET
7442
7443 if (!NIL_P(key)) {
7444 // Matched key should return above
7445 return Qundef;
7446 }
7447
7448 return hash;
7449}
7450
7451VALUE
7452rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE key)
7453{
7454 rb_objspace_t *objspace = objspace_ptr;
7455
7456 return gc_info_decode(objspace, key, 0);
7457}
7458
7459
7460enum gc_stat_sym {
7461 gc_stat_sym_count,
7462 gc_stat_sym_time,
7463 gc_stat_sym_marking_time,
7464 gc_stat_sym_sweeping_time,
7465 gc_stat_sym_heap_allocated_pages,
7466 gc_stat_sym_heap_empty_pages,
7467 gc_stat_sym_heap_allocatable_bytes,
7468 gc_stat_sym_heap_available_slots,
7469 gc_stat_sym_heap_live_slots,
7470 gc_stat_sym_heap_free_slots,
7471 gc_stat_sym_heap_final_slots,
7472 gc_stat_sym_heap_marked_slots,
7473 gc_stat_sym_heap_eden_pages,
7474 gc_stat_sym_total_allocated_pages,
7475 gc_stat_sym_total_freed_pages,
7476 gc_stat_sym_total_allocated_objects,
7477 gc_stat_sym_total_freed_objects,
7478 gc_stat_sym_malloc_increase_bytes,
7479 gc_stat_sym_malloc_increase_bytes_limit,
7480 gc_stat_sym_minor_gc_count,
7481 gc_stat_sym_major_gc_count,
7482 gc_stat_sym_compact_count,
7483 gc_stat_sym_read_barrier_faults,
7484 gc_stat_sym_total_moved_objects,
7485 gc_stat_sym_remembered_wb_unprotected_objects,
7486 gc_stat_sym_remembered_wb_unprotected_objects_limit,
7487 gc_stat_sym_old_objects,
7488 gc_stat_sym_old_objects_limit,
7489#if RGENGC_ESTIMATE_OLDMALLOC
7490 gc_stat_sym_oldmalloc_increase_bytes,
7491 gc_stat_sym_oldmalloc_increase_bytes_limit,
7492#endif
7493#if RGENGC_PROFILE
7494 gc_stat_sym_total_generated_normal_object_count,
7495 gc_stat_sym_total_generated_shady_object_count,
7496 gc_stat_sym_total_shade_operation_count,
7497 gc_stat_sym_total_promoted_count,
7498 gc_stat_sym_total_remembered_normal_object_count,
7499 gc_stat_sym_total_remembered_shady_object_count,
7500#endif
7501 gc_stat_sym_last
7502};
7503
7504static VALUE gc_stat_symbols[gc_stat_sym_last];
7505
7506static void
7507setup_gc_stat_symbols(void)
7508{
7509 if (gc_stat_symbols[0] == 0) {
7510#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7511 S(count);
7512 S(time);
7513 S(marking_time),
7514 S(sweeping_time),
7515 S(heap_allocated_pages);
7516 S(heap_empty_pages);
7517 S(heap_allocatable_bytes);
7518 S(heap_available_slots);
7519 S(heap_live_slots);
7520 S(heap_free_slots);
7521 S(heap_final_slots);
7522 S(heap_marked_slots);
7523 S(heap_eden_pages);
7524 S(total_allocated_pages);
7525 S(total_freed_pages);
7526 S(total_allocated_objects);
7527 S(total_freed_objects);
7528 S(malloc_increase_bytes);
7529 S(malloc_increase_bytes_limit);
7530 S(minor_gc_count);
7531 S(major_gc_count);
7532 S(compact_count);
7533 S(read_barrier_faults);
7534 S(total_moved_objects);
7535 S(remembered_wb_unprotected_objects);
7536 S(remembered_wb_unprotected_objects_limit);
7537 S(old_objects);
7538 S(old_objects_limit);
7539#if RGENGC_ESTIMATE_OLDMALLOC
7540 S(oldmalloc_increase_bytes);
7541 S(oldmalloc_increase_bytes_limit);
7542#endif
7543#if RGENGC_PROFILE
7544 S(total_generated_normal_object_count);
7545 S(total_generated_shady_object_count);
7546 S(total_shade_operation_count);
7547 S(total_promoted_count);
7548 S(total_remembered_normal_object_count);
7549 S(total_remembered_shady_object_count);
7550#endif /* RGENGC_PROFILE */
7551#undef S
7552 }
7553}
7554
7555static uint64_t
7556ns_to_ms(uint64_t ns)
7557{
7558 return ns / (1000 * 1000);
7559}
7560
7561static void malloc_increase_local_flush(rb_objspace_t *objspace);
7562
7563VALUE
7564rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
7565{
7566 rb_objspace_t *objspace = objspace_ptr;
7567 VALUE hash = Qnil, key = Qnil;
7568
7569 setup_gc_stat_symbols();
7570
7571 ractor_cache_flush_count(objspace, rb_gc_get_ractor_newobj_cache());
7572 malloc_increase_local_flush(objspace);
7573
7574 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7575 hash = hash_or_sym;
7576 }
7577 else if (SYMBOL_P(hash_or_sym)) {
7578 key = hash_or_sym;
7579 }
7580 else {
7581 rb_bug("non-hash or symbol given");
7582 }
7583
7584#define SET(name, attr) \
7585 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7586 return SIZET2NUM(attr); \
7587 else if (hash != Qnil) \
7588 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7589
7590 SET(count, objspace->profile.count);
7591 SET(time, (size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns)); // TODO: UINT64T2NUM
7592 SET(marking_time, (size_t)ns_to_ms(objspace->profile.marking_time_ns));
7593 SET(sweeping_time, (size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
7594
7595 /* implementation dependent counters */
7596 SET(heap_allocated_pages, rb_darray_size(objspace->heap_pages.sorted));
7597 SET(heap_empty_pages, objspace->empty_pages_count)
7598 SET(heap_allocatable_bytes, objspace->heap_pages.allocatable_bytes);
7599 SET(heap_available_slots, objspace_available_slots(objspace));
7600 SET(heap_live_slots, objspace_live_slots(objspace));
7601 SET(heap_free_slots, objspace_free_slots(objspace));
7602 SET(heap_final_slots, total_final_slots_count(objspace));
7603 SET(heap_marked_slots, objspace->marked_slots);
7604 SET(heap_eden_pages, heap_eden_total_pages(objspace));
7605 SET(total_allocated_pages, objspace->heap_pages.allocated_pages);
7606 SET(total_freed_pages, objspace->heap_pages.freed_pages);
7607 SET(total_allocated_objects, total_allocated_objects(objspace));
7608 SET(total_freed_objects, total_freed_objects(objspace));
7609 SET(malloc_increase_bytes, malloc_increase);
7610 SET(malloc_increase_bytes_limit, malloc_limit);
7611 SET(minor_gc_count, objspace->profile.minor_gc_count);
7612 SET(major_gc_count, objspace->profile.major_gc_count);
7613 SET(compact_count, objspace->profile.compact_count);
7614 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
7615 SET(total_moved_objects, objspace->rcompactor.total_moved);
7616 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
7617 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7618 SET(old_objects, objspace->rgengc.old_objects);
7619 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
7620#if RGENGC_ESTIMATE_OLDMALLOC
7621 SET(oldmalloc_increase_bytes, objspace->malloc_counters.oldmalloc_increase);
7622 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
7623#endif
7624
7625#if RGENGC_PROFILE
7626 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
7627 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
7628 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
7629 SET(total_promoted_count, objspace->profile.total_promoted_count);
7630 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
7631 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
7632#endif /* RGENGC_PROFILE */
7633#undef SET
7634
7635 if (!NIL_P(key)) {
7636 // Matched key should return above
7637 return Qundef;
7638 }
7639
7640#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7641 if (hash != Qnil) {
7642 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
7643 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
7644 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
7645 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
7646 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
7647 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
7648 }
7649#endif
7650
7651 return hash;
7652}
7653
7654enum gc_stat_heap_sym {
7655 gc_stat_heap_sym_slot_size,
7656 gc_stat_heap_sym_heap_live_slots,
7657 gc_stat_heap_sym_heap_free_slots,
7658 gc_stat_heap_sym_heap_final_slots,
7659 gc_stat_heap_sym_heap_eden_pages,
7660 gc_stat_heap_sym_heap_eden_slots,
7661 gc_stat_heap_sym_total_allocated_pages,
7662 gc_stat_heap_sym_force_major_gc_count,
7663 gc_stat_heap_sym_force_incremental_marking_finish_count,
7664 gc_stat_heap_sym_heap_allocatable_slots,
7665 gc_stat_heap_sym_total_allocated_objects,
7666 gc_stat_heap_sym_total_freed_objects,
7667 gc_stat_heap_sym_last
7668};
7669
7670static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
7671
7672static void
7673setup_gc_stat_heap_symbols(void)
7674{
7675 if (gc_stat_heap_symbols[0] == 0) {
7676#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
7677 S(slot_size);
7678 S(heap_live_slots);
7679 S(heap_free_slots);
7680 S(heap_final_slots);
7681 S(heap_eden_pages);
7682 S(heap_eden_slots);
7683 S(heap_allocatable_slots);
7684 S(total_allocated_pages);
7685 S(force_major_gc_count);
7686 S(force_incremental_marking_finish_count);
7687 S(total_allocated_objects);
7688 S(total_freed_objects);
7689#undef S
7690 }
7691}
7692
7693static VALUE
7694stat_one_heap(rb_objspace_t *objspace, rb_heap_t *heap, VALUE hash, VALUE key)
7695{
7696#define SET(name, attr) \
7697 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
7698 return SIZET2NUM(attr); \
7699 else if (hash != Qnil) \
7700 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
7701
7702 SET(slot_size, heap->slot_size);
7703 SET(heap_live_slots, heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count);
7704 SET(heap_free_slots, heap->total_slots - (heap->total_allocated_objects - heap->total_freed_objects));
7705 SET(heap_final_slots, heap->final_slots_count);
7706 SET(heap_eden_pages, heap->total_pages);
7707 SET(heap_eden_slots, heap->total_slots);
7708 SET(heap_allocatable_slots, objspace->heap_pages.allocatable_bytes / heap->slot_size);
7709 SET(total_allocated_pages, heap->total_allocated_pages);
7710 SET(force_major_gc_count, heap->force_major_gc_count);
7711 SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count);
7712 SET(total_allocated_objects, heap->total_allocated_objects);
7713 SET(total_freed_objects, heap->total_freed_objects);
7714#undef SET
7715
7716 if (!NIL_P(key)) {
7717 // Matched key should return above
7718 return Qundef;
7719 }
7720
7721 return hash;
7722}
7723
7724VALUE
7725rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
7726{
7727 rb_objspace_t *objspace = objspace_ptr;
7728
7729 ractor_cache_flush_count(objspace, rb_gc_get_ractor_newobj_cache());
7730
7731 setup_gc_stat_heap_symbols();
7732
7733 if (NIL_P(heap_name)) {
7734 if (!RB_TYPE_P(hash_or_sym, T_HASH)) {
7735 rb_bug("non-hash given");
7736 }
7737
7738 for (int i = 0; i < HEAP_COUNT; i++) {
7739 VALUE hash = rb_hash_aref(hash_or_sym, INT2FIX(i));
7740 if (NIL_P(hash)) {
7741 hash = rb_hash_new();
7742 rb_hash_aset(hash_or_sym, INT2FIX(i), hash);
7743 }
7744
7745 stat_one_heap(objspace, &heaps[i], hash, Qnil);
7746 }
7747 }
7748 else if (FIXNUM_P(heap_name)) {
7749 int heap_idx = FIX2INT(heap_name);
7750
7751 if (heap_idx < 0 || heap_idx >= HEAP_COUNT) {
7752 rb_raise(rb_eArgError, "size pool index out of range");
7753 }
7754
7755 if (SYMBOL_P(hash_or_sym)) {
7756 return stat_one_heap(objspace, &heaps[heap_idx], Qnil, hash_or_sym);
7757 }
7758 else if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7759 return stat_one_heap(objspace, &heaps[heap_idx], hash_or_sym, Qnil);
7760 }
7761 else {
7762 rb_bug("non-hash or symbol given");
7763 }
7764 }
7765 else {
7766 rb_bug("heap_name must be nil or an Integer");
7767 }
7768
7769 return hash_or_sym;
7770}
7771
7772/* I could include internal.h for this, but doing so undefines some Array macros
7773 * necessary for initialising objects, and I don't want to include all the array
7774 * headers to get them back
7775 * TODO: Investigate why RARRAY_AREF gets undefined in internal.h
7776 */
7777#ifndef RBOOL
7778#define RBOOL(v) (v ? Qtrue : Qfalse)
7779#endif
7780
7781VALUE
7782rb_gc_impl_config_get(void *objspace_ptr)
7783{
7784#define sym(name) ID2SYM(rb_intern_const(name))
7785 rb_objspace_t *objspace = objspace_ptr;
7786 VALUE hash = rb_hash_new();
7787
7788 rb_hash_aset(hash, sym("rgengc_allow_full_mark"), RBOOL(gc_config_full_mark_val));
7789
7790 return hash;
7791}
7792
7793static int
7794gc_config_set_key(VALUE key, VALUE value, VALUE data)
7795{
7797 if (rb_sym2id(key) == rb_intern("rgengc_allow_full_mark")) {
7798 gc_rest(objspace);
7799 gc_config_full_mark_set(RTEST(value));
7800 }
7801 return ST_CONTINUE;
7802}
7803
7804void
7805rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
7806{
7807 rb_objspace_t *objspace = objspace_ptr;
7808
7809 if (!RB_TYPE_P(hash, T_HASH)) {
7810 rb_raise(rb_eArgError, "expected keyword arguments");
7811 }
7812
7813 rb_hash_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7814}
7815
7816VALUE
7817rb_gc_impl_stress_get(void *objspace_ptr)
7818{
7819 rb_objspace_t *objspace = objspace_ptr;
7820 return ruby_gc_stress_mode;
7821}
7822
7823void
7824rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
7825{
7826 rb_objspace_t *objspace = objspace_ptr;
7827
7828 objspace->flags.gc_stressful = RTEST(flag);
7829 objspace->gc_stress_mode = flag;
7830}
7831
7832static int
7833get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
7834{
7835 const char *ptr = getenv(name);
7836 ssize_t val;
7837
7838 if (ptr != NULL && *ptr) {
7839 size_t unit = 0;
7840 char *end;
7841#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7842 val = strtoll(ptr, &end, 0);
7843#else
7844 val = strtol(ptr, &end, 0);
7845#endif
7846 switch (*end) {
7847 case 'k': case 'K':
7848 unit = 1024;
7849 ++end;
7850 break;
7851 case 'm': case 'M':
7852 unit = 1024*1024;
7853 ++end;
7854 break;
7855 case 'g': case 'G':
7856 unit = 1024*1024*1024;
7857 ++end;
7858 break;
7859 }
7860 while (*end && isspace((unsigned char)*end)) end++;
7861 if (*end) {
7862 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7863 return 0;
7864 }
7865 if (unit > 0) {
7866 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7867 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
7868 return 0;
7869 }
7870 val *= unit;
7871 }
7872 if (val > 0 && (size_t)val > lower_bound) {
7873 if (RTEST(ruby_verbose)) {
7874 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7875 }
7876 *default_value = (size_t)val;
7877 return 1;
7878 }
7879 else {
7880 if (RTEST(ruby_verbose)) {
7881 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7882 name, val, *default_value, lower_bound);
7883 }
7884 return 0;
7885 }
7886 }
7887 return 0;
7888}
7889
7890static int
7891get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
7892{
7893 const char *ptr = getenv(name);
7894 double val;
7895
7896 if (ptr != NULL && *ptr) {
7897 char *end;
7898 val = strtod(ptr, &end);
7899 if (!*ptr || *end) {
7900 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7901 return 0;
7902 }
7903
7904 if (accept_zero && val == 0.0) {
7905 goto accept;
7906 }
7907 else if (val <= lower_bound) {
7908 if (RTEST(ruby_verbose)) {
7909 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7910 name, val, *default_value, lower_bound);
7911 }
7912 }
7913 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
7914 val > upper_bound) {
7915 if (RTEST(ruby_verbose)) {
7916 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7917 name, val, *default_value, upper_bound);
7918 }
7919 }
7920 else {
7921 goto accept;
7922 }
7923 }
7924 return 0;
7925
7926 accept:
7927 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
7928 *default_value = val;
7929 return 1;
7930}
7931
7932/*
7933 * GC tuning environment variables
7934 *
7935 * * RUBY_GC_HEAP_FREE_SLOTS
7936 * - Prepare at least this amount of slots after GC.
7937 * - Allocate slots if there are not enough slots.
7938 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
7939 * - Allocate slots by this factor.
7940 * - (next slots number) = (current slots number) * (this factor)
7941 * * RUBY_GC_HEAP_GROWTH_MAX_BYTES (was RUBY_GC_HEAP_GROWTH_MAX_SLOTS)
7942 * - Allocation rate is limited to this number of bytes.
7943 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
7944 * - Allocate additional pages when the number of free slots is
7945 * lower than the value (total_slots * (this ratio)).
7946 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
7947 * - Allocate slots to satisfy this formula:
7948 * free_slots = total_slots * goal_ratio
7949 * - In other words, prepare (total_slots * goal_ratio) free slots.
7950 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
7951 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
7952 * - Allow to free pages when the number of free slots is
7953 * greater than the value (total_slots * (this ratio)).
7954 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
7955 * - Do full GC when the number of old objects is more than R * N
7956 * where R is this factor and
7957 * N is the number of old objects just after last full GC.
7958 *
7959 * * obsolete
7960 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
7961 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1) -> RUBY_GC_HEAP_INIT_BYTES
7962 *
7963 * * RUBY_GC_MALLOC_LIMIT
7964 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
7965 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7966 *
7967 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
7968 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
7969 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7970 */
7971
7972void
7973rb_gc_impl_set_params(void *objspace_ptr)
7974{
7975 rb_objspace_t *objspace = objspace_ptr;
7976 /* RUBY_GC_HEAP_FREE_SLOTS */
7977 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7978 /* ok */
7979 }
7980
7981 get_envparam_size("RUBY_GC_HEAP_INIT_BYTES", &gc_params.heap_init_bytes, 0);
7982
7983 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7984 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_BYTES", &gc_params.growth_max_bytes, 0);
7985 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
7986 0.0, 1.0, FALSE);
7987 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
7988 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
7989 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
7990 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
7991 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
7992 get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
7993
7994 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
7995 malloc_limit = gc_params.malloc_limit_min;
7996 }
7997 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
7998 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
7999 gc_params.malloc_limit_max = SIZE_MAX;
8000 }
8001 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
8002
8003#if RGENGC_ESTIMATE_OLDMALLOC
8004 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
8005 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8006 }
8007 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
8008 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
8009#endif
8010}
8011
8012static inline size_t
8013objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
8014{
8015#ifdef HAVE_MALLOC_USABLE_SIZE
8016 if (!hint) {
8017 hint = malloc_usable_size(ptr);
8018 }
8019#endif
8020 return hint;
8021}
8022
8023enum memop_type {
8024 MEMOP_TYPE_MALLOC = 0,
8025 MEMOP_TYPE_FREE,
8026 MEMOP_TYPE_REALLOC
8027};
8028
8029static inline void
8030atomic_sub_nounderflow(size_t *var, size_t sub)
8031{
8032 if (sub == 0) return;
8033
8034 while (1) {
8035 size_t val = *var;
8036 if (val < sub) sub = val;
8037 if (RUBY_ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
8038 }
8039}
8040
8041#define gc_stress_full_mark_after_malloc_p() \
8042 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8043
8044static void
8045objspace_malloc_gc_stress(rb_objspace_t *objspace)
8046{
8047 if (ruby_gc_stressful && ruby_native_thread_p()) {
8048 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
8049 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
8050
8051 if (gc_stress_full_mark_after_malloc_p()) {
8052 reason |= GPR_FLAG_FULL_MARK;
8053 }
8054 garbage_collect_with_gvl(objspace, reason);
8055 }
8056}
8057
8058static void
8059malloc_increase_commit(rb_objspace_t *objspace, size_t new_size, size_t old_size)
8060{
8061 if (new_size > old_size) {
8062 RUBY_ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
8063#if RGENGC_ESTIMATE_OLDMALLOC
8064 RUBY_ATOMIC_SIZE_ADD(objspace->malloc_counters.oldmalloc_increase, new_size - old_size);
8065#endif
8066 }
8067 else {
8068 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8069#if RGENGC_ESTIMATE_OLDMALLOC
8070 atomic_sub_nounderflow(&objspace->malloc_counters.oldmalloc_increase, old_size - new_size);
8071#endif
8072 }
8073}
8074
8075#if USE_MALLOC_INCREASE_LOCAL
8076static void
8077malloc_increase_local_flush(rb_objspace_t *objspace)
8078{
8079 int delta = malloc_increase_local;
8080 if (delta == 0) return;
8081
8082 malloc_increase_local = 0;
8083 if (delta > 0) {
8084 malloc_increase_commit(objspace, (size_t)delta, 0);
8085 }
8086 else {
8087 malloc_increase_commit(objspace, 0, (size_t)(-delta));
8088 }
8089}
8090#else
8091static void
8092malloc_increase_local_flush(rb_objspace_t *objspace)
8093{
8094}
8095#endif
8096
8097static inline bool
8098objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type, bool gc_allowed)
8099{
8100 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
8101 mem,
8102 type == MEMOP_TYPE_MALLOC ? "malloc" :
8103 type == MEMOP_TYPE_FREE ? "free " :
8104 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
8105 new_size, old_size);
8106 return false;
8107}
8108
8109static bool
8110objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type, bool gc_allowed)
8111{
8112#if USE_MALLOC_INCREASE_LOCAL
8113 if (new_size < GC_MALLOC_INCREASE_LOCAL_THRESHOLD &&
8114 old_size < GC_MALLOC_INCREASE_LOCAL_THRESHOLD) {
8115 malloc_increase_local += (int)new_size - (int)old_size;
8116
8117 if (malloc_increase_local >= GC_MALLOC_INCREASE_LOCAL_THRESHOLD ||
8118 malloc_increase_local <= -GC_MALLOC_INCREASE_LOCAL_THRESHOLD) {
8119 malloc_increase_local_flush(objspace);
8120 }
8121 }
8122 else {
8123 malloc_increase_local_flush(objspace);
8124 malloc_increase_commit(objspace, new_size, old_size);
8125 }
8126#else
8127 malloc_increase_commit(objspace, new_size, old_size);
8128#endif
8129
8130 if (type == MEMOP_TYPE_MALLOC && gc_allowed) {
8131 retry:
8132 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
8133 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
8134 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
8135 goto retry;
8136 }
8137 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
8138 }
8139 }
8140
8141#if MALLOC_ALLOCATED_SIZE
8142 if (new_size >= old_size) {
8143 RUBY_ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
8144 }
8145 else {
8146 size_t dec_size = old_size - new_size;
8147
8148#if MALLOC_ALLOCATED_SIZE_CHECK
8149 size_t allocated_size = objspace->malloc_params.allocated_size;
8150 if (allocated_size < dec_size) {
8151 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
8152 }
8153#endif
8154 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
8155 }
8156
8157 switch (type) {
8158 case MEMOP_TYPE_MALLOC:
8159 RUBY_ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
8160 break;
8161 case MEMOP_TYPE_FREE:
8162 {
8163 size_t allocations = objspace->malloc_params.allocations;
8164 if (allocations > 0) {
8165 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
8166 }
8167#if MALLOC_ALLOCATED_SIZE_CHECK
8168 else {
8169 GC_ASSERT(objspace->malloc_params.allocations > 0);
8170 }
8171#endif
8172 }
8173 break;
8174 case MEMOP_TYPE_REALLOC: /* ignore */ break;
8175 }
8176#endif
8177 return true;
8178}
8179
8180#define objspace_malloc_increase(...) \
8181 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
8182 !malloc_increase_done; \
8183 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
8184
8185struct malloc_obj_info { /* 4 words */
8186 size_t size;
8187};
8188
8189static inline size_t
8190objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
8191{
8192 if (size == 0) size = 1;
8193
8194#if CALC_EXACT_MALLOC_SIZE
8195 size += sizeof(struct malloc_obj_info);
8196#endif
8197
8198 return size;
8199}
8200
8201static bool
8202malloc_during_gc_p(rb_objspace_t *objspace)
8203{
8204 /* malloc is not allowed during GC when we're not using multiple ractors
8205 * (since ractors can run while another thread is sweeping) and when we
8206 * have the GVL (since if we don't have the GVL, we'll try to acquire the
8207 * GVL which will block and ensure the other thread finishes GC). */
8208 return during_gc && !dont_gc_val() && !rb_gc_multi_ractor_p() && ruby_thread_has_gvl_p();
8209}
8210
8211static inline void *
8212objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size, bool gc_allowed)
8213{
8214 size = objspace_malloc_size(objspace, mem, size);
8215 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC, gc_allowed) {}
8216
8217#if CALC_EXACT_MALLOC_SIZE
8218 {
8219 struct malloc_obj_info *info = mem;
8220 info->size = size;
8221 mem = info + 1;
8222 }
8223#endif
8224
8225 return mem;
8226}
8227
8228#if defined(__GNUC__) && RUBY_DEBUG
8229#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
8230#endif
8231
8232#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
8233# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
8234#endif
8235
8236#define GC_MEMERROR(...) \
8237 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : (void)0)
8238
8239#define TRY_WITH_GC(siz, expr) do { \
8240 const gc_profile_record_flag gpr = \
8241 GPR_FLAG_FULL_MARK | \
8242 GPR_FLAG_IMMEDIATE_MARK | \
8243 GPR_FLAG_IMMEDIATE_SWEEP | \
8244 GPR_FLAG_MALLOC; \
8245 objspace_malloc_gc_stress(objspace); \
8246 \
8247 if (RB_LIKELY((expr))) { \
8248 /* Success on 1st try */ \
8249 } \
8250 else if (gc_allowed && !garbage_collect_with_gvl(objspace, gpr)) { \
8251 /* @shyouhei thinks this doesn't happen */ \
8252 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
8253 } \
8254 else if ((expr)) { \
8255 /* Success on 2nd try */ \
8256 } \
8257 else { \
8258 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
8259 "%"PRIdSIZE" bytes for %s", \
8260 siz, # expr); \
8261 } \
8262 } while (0)
8263
8264static void
8265check_malloc_not_in_gc(rb_objspace_t *objspace, const char *msg)
8266{
8267 if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8268 dont_gc_on();
8269 during_gc = false;
8270 rb_bug("Cannot %s during GC", msg);
8271 }
8272}
8273
8274void
8275rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
8276{
8277 rb_objspace_t *objspace = objspace_ptr;
8278
8279 if (!ptr) {
8280 /*
8281 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
8282 * its first version. We would better follow.
8283 */
8284 return;
8285 }
8286#if CALC_EXACT_MALLOC_SIZE
8287 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8288#if VERIFY_FREE_SIZE
8289 if (!info->size) {
8290 rb_bug("buffer %p has no recorded size. Was it allocated with ruby_mimalloc? If so it should be freed with ruby_mimfree", ptr);
8291 }
8292
8293 if (old_size && (old_size + sizeof(struct malloc_obj_info)) != info->size) {
8294 rb_bug("buffer %p freed with old_size=%zu, but was allocated with size=%zu", ptr, old_size, info->size - sizeof(struct malloc_obj_info));
8295 }
8296#endif
8297 ptr = info;
8298 old_size = info->size;
8299#endif
8300 old_size = objspace_malloc_size(objspace, ptr, old_size);
8301
8302 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE, true) {
8303 free(ptr);
8304 ptr = NULL;
8305 RB_DEBUG_COUNTER_INC(heap_xfree);
8306 }
8307}
8308
8309void *
8310rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
8311{
8312 rb_objspace_t *objspace = objspace_ptr;
8313 check_malloc_not_in_gc(objspace, "malloc");
8314
8315 void *mem;
8316
8317 size = objspace_malloc_prepare(objspace, size);
8318 TRY_WITH_GC(size, mem = malloc(size));
8319 RB_DEBUG_COUNTER_INC(heap_xmalloc);
8320 if (!mem) return mem;
8321 return objspace_malloc_fixup(objspace, mem, size, gc_allowed);
8322}
8323
8324void *
8325rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
8326{
8327 rb_objspace_t *objspace = objspace_ptr;
8328
8329 if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8330 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
8331#if RGENGC_CHECK_MODE || RUBY_DEBUG
8332 rb_bug("Cannot calloc during GC");
8333#endif
8334 }
8335
8336 void *mem;
8337
8338 size = objspace_malloc_prepare(objspace, size);
8339 TRY_WITH_GC(size, mem = calloc1(size));
8340 if (!mem) return mem;
8341 return objspace_malloc_fixup(objspace, mem, size, gc_allowed);
8342}
8343
8344void *
8345rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
8346{
8347 rb_objspace_t *objspace = objspace_ptr;
8348
8349 check_malloc_not_in_gc(objspace, "realloc");
8350
8351 void *mem;
8352
8353 if (!ptr) return rb_gc_impl_malloc(objspace, new_size, gc_allowed);
8354
8355 /*
8356 * The behavior of realloc(ptr, 0) is implementation defined.
8357 * Therefore we don't use realloc(ptr, 0) for portability reason.
8358 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
8359 */
8360 if (new_size == 0) {
8361 if ((mem = rb_gc_impl_malloc(objspace, 0, gc_allowed)) != NULL) {
8362 /*
8363 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
8364 * returns a non-NULL pointer to an access-protected memory page.
8365 * The returned pointer cannot be read / written at all, but
8366 * still be a valid argument of free().
8367 *
8368 * https://man.openbsd.org/malloc.3
8369 *
8370 * - Linux's malloc(3) man page says that it _might_ perhaps return
8371 * a non-NULL pointer when its argument is 0. That return value
8372 * is safe (and is expected) to be passed to free().
8373 *
8374 * https://man7.org/linux/man-pages/man3/malloc.3.html
8375 *
8376 * - As I read the implementation jemalloc's malloc() returns fully
8377 * normal 16 bytes memory region when its argument is 0.
8378 *
8379 * - As I read the implementation musl libc's malloc() returns
8380 * fully normal 32 bytes memory region when its argument is 0.
8381 *
8382 * - Other malloc implementations can also return non-NULL.
8383 */
8384 rb_gc_impl_free(objspace, ptr, old_size);
8385 return mem;
8386 }
8387 else {
8388 /*
8389 * It is dangerous to return NULL here, because that could lead to
8390 * RCE. Fallback to 1 byte instead of zero.
8391 *
8392 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
8393 */
8394 new_size = 1;
8395 }
8396 }
8397
8398#if CALC_EXACT_MALLOC_SIZE
8399 {
8400 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8401 new_size += sizeof(struct malloc_obj_info);
8402 ptr = info;
8403#if VERIFY_FREE_SIZE
8404 if (old_size && (old_size + sizeof(struct malloc_obj_info)) != info->size) {
8405 rb_bug("buffer %p realloced with old_size=%zu, but was allocated with size=%zu", ptr, old_size, info->size - sizeof(struct malloc_obj_info));
8406 }
8407#endif
8408 old_size = info->size;
8409 }
8410#endif
8411
8412 old_size = objspace_malloc_size(objspace, ptr, old_size);
8413 TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
8414 if (!mem) return mem;
8415 new_size = objspace_malloc_size(objspace, mem, new_size);
8416
8417#if CALC_EXACT_MALLOC_SIZE
8418 {
8419 struct malloc_obj_info *info = mem;
8420 info->size = new_size;
8421 mem = info + 1;
8422 }
8423#endif
8424
8425 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC, gc_allowed);
8426
8427 RB_DEBUG_COUNTER_INC(heap_xrealloc);
8428 return mem;
8429}
8430
8431void
8432rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff)
8433{
8434 rb_objspace_t *objspace = objspace_ptr;
8435
8436 if (diff > 0) {
8437 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC, true);
8438 }
8439 else if (diff < 0) {
8440 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC, true);
8441 }
8442}
8443
8444// TODO: move GC profiler stuff back into gc.c
8445/*
8446 ------------------------------ GC profiler ------------------------------
8447*/
8448
8449#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8450
8451static bool
8452current_process_time(struct timespec *ts)
8453{
8454#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8455 {
8456 static int try_clock_gettime = 1;
8457 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
8458 return true;
8459 }
8460 else {
8461 try_clock_gettime = 0;
8462 }
8463 }
8464#endif
8465
8466#ifdef RUSAGE_SELF
8467 {
8468 struct rusage usage;
8469 struct timeval time;
8470 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8471 time = usage.ru_utime;
8472 ts->tv_sec = time.tv_sec;
8473 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
8474 return true;
8475 }
8476 }
8477#endif
8478
8479#ifdef _WIN32
8480 {
8481 FILETIME creation_time, exit_time, kernel_time, user_time;
8482 ULARGE_INTEGER ui;
8483
8484 if (GetProcessTimes(GetCurrentProcess(),
8485 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8486 memcpy(&ui, &user_time, sizeof(FILETIME));
8487#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
8488 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
8489 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
8490 return true;
8491 }
8492 }
8493#endif
8494
8495 return false;
8496}
8497
8498static double
8499getrusage_time(void)
8500{
8501 struct timespec ts;
8502 if (current_process_time(&ts)) {
8503 return ts.tv_sec + ts.tv_nsec * 1e-9;
8504 }
8505 else {
8506 return 0.0;
8507 }
8508}
8509
8510
8511static inline void
8512gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
8513{
8514 if (objspace->profile.run) {
8515 size_t index = objspace->profile.next_index;
8516 gc_profile_record *record;
8517
8518 /* create new record */
8519 objspace->profile.next_index++;
8520
8521 if (!objspace->profile.records) {
8522 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
8523 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8524 }
8525 if (index >= objspace->profile.size) {
8526 void *ptr;
8527 objspace->profile.size += 1000;
8528 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8529 if (!ptr) rb_memerror();
8530 objspace->profile.records = ptr;
8531 }
8532 if (!objspace->profile.records) {
8533 rb_bug("gc_profile malloc or realloc miss");
8534 }
8535 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
8536 MEMZERO(record, gc_profile_record, 1);
8537
8538 /* setup before-GC parameter */
8539 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
8540#if MALLOC_ALLOCATED_SIZE
8541 record->allocated_size = malloc_allocated_size;
8542#endif
8543#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
8544#ifdef RUSAGE_SELF
8545 {
8546 struct rusage usage;
8547 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8548 record->maxrss = usage.ru_maxrss;
8549 record->minflt = usage.ru_minflt;
8550 record->majflt = usage.ru_majflt;
8551 }
8552 }
8553#endif
8554#endif
8555 }
8556}
8557
8558static inline void
8559gc_prof_timer_start(rb_objspace_t *objspace)
8560{
8561 if (gc_prof_enabled(objspace)) {
8562 gc_profile_record *record = gc_prof_record(objspace);
8563#if GC_PROFILE_MORE_DETAIL
8564 record->prepare_time = objspace->profile.prepare_time;
8565#endif
8566 record->gc_time = 0;
8567 record->gc_invoke_time = getrusage_time();
8568 }
8569}
8570
8571static double
8572elapsed_time_from(double time)
8573{
8574 double now = getrusage_time();
8575 if (now > time) {
8576 return now - time;
8577 }
8578 else {
8579 return 0;
8580 }
8581}
8582
8583static inline void
8584gc_prof_timer_stop(rb_objspace_t *objspace)
8585{
8586 if (gc_prof_enabled(objspace)) {
8587 gc_profile_record *record = gc_prof_record(objspace);
8588 record->gc_time = elapsed_time_from(record->gc_invoke_time);
8589 record->gc_invoke_time -= objspace->profile.invoke_time;
8590 }
8591}
8592
8593#ifdef BUILDING_MODULAR_GC
8594# define RUBY_DTRACE_GC_HOOK(name)
8595#else
8596# define RUBY_DTRACE_GC_HOOK(name) \
8597 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
8598#endif
8599
8600static inline void
8601gc_prof_mark_timer_start(rb_objspace_t *objspace)
8602{
8603 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
8604#if GC_PROFILE_MORE_DETAIL
8605 if (gc_prof_enabled(objspace)) {
8606 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
8607 }
8608#endif
8609}
8610
8611static inline void
8612gc_prof_mark_timer_stop(rb_objspace_t *objspace)
8613{
8614 RUBY_DTRACE_GC_HOOK(MARK_END);
8615#if GC_PROFILE_MORE_DETAIL
8616 if (gc_prof_enabled(objspace)) {
8617 gc_profile_record *record = gc_prof_record(objspace);
8618 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8619 }
8620#endif
8621}
8622
8623static inline void
8624gc_prof_sweep_timer_start(rb_objspace_t *objspace)
8625{
8626 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
8627 if (gc_prof_enabled(objspace)) {
8628 gc_profile_record *record = gc_prof_record(objspace);
8629
8630 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
8631 objspace->profile.gc_sweep_start_time = getrusage_time();
8632 }
8633 }
8634}
8635
8636static inline void
8637gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
8638{
8639 RUBY_DTRACE_GC_HOOK(SWEEP_END);
8640
8641 if (gc_prof_enabled(objspace)) {
8642 double sweep_time;
8643 gc_profile_record *record = gc_prof_record(objspace);
8644
8645 if (record->gc_time > 0) {
8646 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8647 /* need to accumulate GC time for lazy sweep after gc() */
8648 record->gc_time += sweep_time;
8649 }
8650 else if (GC_PROFILE_MORE_DETAIL) {
8651 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8652 }
8653
8654#if GC_PROFILE_MORE_DETAIL
8655 record->gc_sweep_time += sweep_time;
8656 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
8657#endif
8658 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
8659 }
8660}
8661
8662static inline void
8663gc_prof_set_malloc_info(rb_objspace_t *objspace)
8664{
8665#if GC_PROFILE_MORE_DETAIL
8666 if (gc_prof_enabled(objspace)) {
8667 gc_profile_record *record = gc_prof_record(objspace);
8668 record->allocate_increase = malloc_increase;
8669 record->allocate_limit = malloc_limit;
8670 }
8671#endif
8672}
8673
8674static inline void
8675gc_prof_set_heap_info(rb_objspace_t *objspace)
8676{
8677 if (gc_prof_enabled(objspace)) {
8678 gc_profile_record *record = gc_prof_record(objspace);
8679
8680 /* Sum across all size pools since each has a different slot size. */
8681 size_t total = 0;
8682 size_t use_size = 0;
8683 size_t total_size = 0;
8684 for (int i = 0; i < HEAP_COUNT; i++) {
8685 rb_heap_t *heap = &heaps[i];
8686 size_t heap_live = heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count;
8687 total += heap->total_slots;
8688 use_size += heap_live * heap->slot_size;
8689 total_size += heap->total_slots * heap->slot_size;
8690 }
8691
8692#if GC_PROFILE_MORE_DETAIL
8693 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
8694 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
8695 record->heap_live_objects = live;
8696 record->heap_free_objects = total - live;
8697#endif
8698
8699 record->heap_total_objects = total;
8700 record->heap_use_size = use_size;
8701 record->heap_total_size = total_size;
8702 }
8703}
8704
8705/*
8706 * call-seq:
8707 * GC::Profiler.clear -> nil
8708 *
8709 * Clears the \GC profiler data.
8710 *
8711 */
8712
8713static VALUE
8714gc_profile_clear(VALUE _)
8715{
8716 rb_objspace_t *objspace = rb_gc_get_objspace();
8717 void *p = objspace->profile.records;
8718 objspace->profile.records = NULL;
8719 objspace->profile.size = 0;
8720 objspace->profile.next_index = 0;
8721 objspace->profile.current_record = 0;
8722 free(p);
8723 return Qnil;
8724}
8725
8726/*
8727 * call-seq:
8728 * GC::Profiler.raw_data -> [Hash, ...]
8729 *
8730 * Returns an Array of individual raw profile data Hashes ordered
8731 * from earliest to latest by +:GC_INVOKE_TIME+.
8732 *
8733 * For example:
8734 *
8735 * [
8736 * {
8737 * :GC_TIME=>1.3000000000000858e-05,
8738 * :GC_INVOKE_TIME=>0.010634999999999999,
8739 * :HEAP_USE_SIZE=>289640,
8740 * :HEAP_TOTAL_SIZE=>588960,
8741 * :HEAP_TOTAL_OBJECTS=>14724,
8742 * :GC_IS_MARKED=>false
8743 * },
8744 * # ...
8745 * ]
8746 *
8747 * The keys mean:
8748 *
8749 * +:GC_TIME+::
8750 * Time elapsed in seconds for this GC run
8751 * +:GC_INVOKE_TIME+::
8752 * Time elapsed in seconds from startup to when the GC was invoked
8753 * +:HEAP_USE_SIZE+::
8754 * Total bytes of heap used
8755 * +:HEAP_TOTAL_SIZE+::
8756 * Total size of heap in bytes
8757 * +:HEAP_TOTAL_OBJECTS+::
8758 * Total number of objects
8759 * +:GC_IS_MARKED+::
8760 * Returns +true+ if the GC is in mark phase
8761 *
8762 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
8763 * to the following hash keys:
8764 *
8765 * +:GC_MARK_TIME+::
8766 * +:GC_SWEEP_TIME+::
8767 * +:ALLOCATE_INCREASE+::
8768 * +:ALLOCATE_LIMIT+::
8769 * +:HEAP_USE_PAGES+::
8770 * +:HEAP_LIVE_OBJECTS+::
8771 * +:HEAP_FREE_OBJECTS+::
8772 * +:HAVE_FINALIZE+::
8773 *
8774 */
8775
8776static VALUE
8777gc_profile_record_get(VALUE _)
8778{
8779 VALUE prof;
8780 VALUE gc_profile = rb_ary_new();
8781 size_t i;
8782 rb_objspace_t *objspace = rb_gc_get_objspace();
8783
8784 if (!objspace->profile.run) {
8785 return Qnil;
8786 }
8787
8788 for (i =0; i < objspace->profile.next_index; i++) {
8789 gc_profile_record *record = &objspace->profile.records[i];
8790
8791 prof = rb_hash_new();
8792 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
8793 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
8794 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
8795 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
8796 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
8797 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
8798 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
8799 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
8800#if GC_PROFILE_MORE_DETAIL
8801 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
8802 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
8803 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
8804 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
8805 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
8806 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
8807 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
8808
8809 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
8810 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
8811
8812 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8813#endif
8814
8815#if RGENGC_PROFILE > 0
8816 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
8817 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
8818 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
8819#endif
8820 rb_ary_push(gc_profile, prof);
8821 }
8822
8823 return gc_profile;
8824}
8825
8826#if GC_PROFILE_MORE_DETAIL
8827#define MAJOR_REASON_MAX 0x10
8828
8829static char *
8830gc_profile_dump_major_reason(unsigned int flags, char *buff)
8831{
8832 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
8833 int i = 0;
8834
8835 if (reason == GPR_FLAG_NONE) {
8836 buff[0] = '-';
8837 buff[1] = 0;
8838 }
8839 else {
8840#define C(x, s) \
8841 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
8842 buff[i++] = #x[0]; \
8843 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
8844 buff[i] = 0; \
8845 }
8846 C(NOFREE, N);
8847 C(OLDGEN, O);
8848 C(SHADY, S);
8849#if RGENGC_ESTIMATE_OLDMALLOC
8850 C(OLDMALLOC, M);
8851#endif
8852#undef C
8853 }
8854 return buff;
8855}
8856#endif
8857
8858
8859
8860static void
8861gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
8862{
8863 rb_objspace_t *objspace = rb_gc_get_objspace();
8864 size_t count = objspace->profile.next_index;
8865#ifdef MAJOR_REASON_MAX
8866 char reason_str[MAJOR_REASON_MAX];
8867#endif
8868
8869 if (objspace->profile.run && count /* > 1 */) {
8870 size_t i;
8871 const gc_profile_record *record;
8872
8873 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
8874 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8875
8876 for (i = 0; i < count; i++) {
8877 record = &objspace->profile.records[i];
8878 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
8879 i+1, record->gc_invoke_time, record->heap_use_size,
8880 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
8881 }
8882
8883#if GC_PROFILE_MORE_DETAIL
8884 const char *str = "\n\n" \
8885 "More detail.\n" \
8886 "Prepare Time = Previously GC's rest sweep time\n"
8887 "Index Flags Allocate Inc. Allocate Limit"
8888#if CALC_EXACT_MALLOC_SIZE
8889 " Allocated Size"
8890#endif
8891 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
8892#if RGENGC_PROFILE
8893 " OldgenObj RemNormObj RemShadObj"
8894#endif
8895#if GC_PROFILE_DETAIL_MEMORY
8896 " MaxRSS(KB) MinorFLT MajorFLT"
8897#endif
8898 "\n";
8899 append(out, rb_str_new_cstr(str));
8900
8901 for (i = 0; i < count; i++) {
8902 record = &objspace->profile.records[i];
8903 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
8904#if CALC_EXACT_MALLOC_SIZE
8905 " %15"PRIuSIZE
8906#endif
8907 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8908#if RGENGC_PROFILE
8909 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8910#endif
8911#if GC_PROFILE_DETAIL_MEMORY
8912 "%11ld %8ld %8ld"
8913#endif
8914
8915 "\n",
8916 i+1,
8917 gc_profile_dump_major_reason(record->flags, reason_str),
8918 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
8919 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
8920 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
8921 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
8922 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
8923 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
8924 record->allocate_increase, record->allocate_limit,
8925#if CALC_EXACT_MALLOC_SIZE
8926 record->allocated_size,
8927#endif
8928 record->heap_use_pages,
8929 record->gc_mark_time*1000,
8930 record->gc_sweep_time*1000,
8931 record->prepare_time*1000,
8932
8933 record->heap_live_objects,
8934 record->heap_free_objects,
8935 record->removing_objects,
8936 record->empty_objects
8937#if RGENGC_PROFILE
8938 ,
8939 record->old_objects,
8940 record->remembered_normal_objects,
8941 record->remembered_shady_objects
8942#endif
8943#if GC_PROFILE_DETAIL_MEMORY
8944 ,
8945 record->maxrss / 1024,
8946 record->minflt,
8947 record->majflt
8948#endif
8949
8950 ));
8951 }
8952#endif
8953 }
8954}
8955
8956/*
8957 * call-seq:
8958 * GC::Profiler.result -> String
8959 *
8960 * Returns a profile data report such as:
8961 *
8962 * GC 1 invokes.
8963 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
8964 * 1 0.012 159240 212940 10647 0.00000000000001530000
8965 */
8966
8967static VALUE
8968gc_profile_result(VALUE _)
8969{
8970 VALUE str = rb_str_buf_new(0);
8971 gc_profile_dump_on(str, rb_str_buf_append);
8972 return str;
8973}
8974
8975/*
8976 * call-seq:
8977 * GC::Profiler.report
8978 * GC::Profiler.report(io)
8979 *
8980 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
8981 *
8982 */
8983
8984static VALUE
8985gc_profile_report(int argc, VALUE *argv, VALUE self)
8986{
8987 VALUE out;
8988
8989 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
8990 gc_profile_dump_on(out, rb_io_write);
8991
8992 return Qnil;
8993}
8994
8995/*
8996 * call-seq:
8997 * GC::Profiler.total_time -> float
8998 *
8999 * The total time used for garbage collection in seconds
9000 */
9001
9002static VALUE
9003gc_profile_total_time(VALUE self)
9004{
9005 double time = 0;
9006 rb_objspace_t *objspace = rb_gc_get_objspace();
9007
9008 if (objspace->profile.run && objspace->profile.next_index > 0) {
9009 size_t i;
9010 size_t count = objspace->profile.next_index;
9011
9012 for (i = 0; i < count; i++) {
9013 time += objspace->profile.records[i].gc_time;
9014 }
9015 }
9016 return DBL2NUM(time);
9017}
9018
9019/*
9020 * call-seq:
9021 * GC::Profiler.enabled? -> true or false
9022 *
9023 * The current status of \GC profile mode.
9024 */
9025
9026static VALUE
9027gc_profile_enable_get(VALUE self)
9028{
9029 rb_objspace_t *objspace = rb_gc_get_objspace();
9030 return objspace->profile.run ? Qtrue : Qfalse;
9031}
9032
9033/*
9034 * call-seq:
9035 * GC::Profiler.enable -> nil
9036 *
9037 * Starts the \GC profiler.
9038 *
9039 */
9040
9041static VALUE
9042gc_profile_enable(VALUE _)
9043{
9044 rb_objspace_t *objspace = rb_gc_get_objspace();
9045 objspace->profile.run = TRUE;
9046 objspace->profile.current_record = 0;
9047 return Qnil;
9048}
9049
9050/*
9051 * call-seq:
9052 * GC::Profiler.disable -> nil
9053 *
9054 * Stops the \GC profiler.
9055 *
9056 */
9057
9058static VALUE
9059gc_profile_disable(VALUE _)
9060{
9061 rb_objspace_t *objspace = rb_gc_get_objspace();
9062
9063 objspace->profile.run = FALSE;
9064 objspace->profile.current_record = 0;
9065 return Qnil;
9066}
9067
9068void
9069rb_gc_verify_internal_consistency(void)
9070{
9071 gc_verify_internal_consistency(rb_gc_get_objspace());
9072}
9073
9074/*
9075 * call-seq:
9076 * GC.verify_internal_consistency -> nil
9077 *
9078 * Verify internal consistency.
9079 *
9080 * This method is implementation specific.
9081 * Now this method checks generational consistency
9082 * if RGenGC is supported.
9083 */
9084static VALUE
9085gc_verify_internal_consistency_m(VALUE dummy)
9086{
9087 rb_gc_verify_internal_consistency();
9088 return Qnil;
9089}
9090
9091#if GC_CAN_COMPILE_COMPACTION
9092/*
9093 * call-seq:
9094 * GC.auto_compact = flag
9095 *
9096 * Updates automatic compaction mode.
9097 *
9098 * When enabled, the compactor will execute on every major collection.
9099 *
9100 * Enabling compaction will degrade performance on major collections.
9101 */
9102static VALUE
9103gc_set_auto_compact(VALUE _, VALUE v)
9104{
9105 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9106
9107 ruby_enable_autocompact = RTEST(v);
9108
9109#if RGENGC_CHECK_MODE
9110 ruby_autocompact_compare_func = NULL;
9111
9112 if (SYMBOL_P(v)) {
9113 ID id = RB_SYM2ID(v);
9114 if (id == rb_intern("empty")) {
9115 ruby_autocompact_compare_func = compare_free_slots;
9116 }
9117 }
9118#endif
9119
9120 return v;
9121}
9122#else
9123# define gc_set_auto_compact rb_f_notimplement
9124#endif
9125
9126#if GC_CAN_COMPILE_COMPACTION
9127/*
9128 * call-seq:
9129 * GC.auto_compact -> true or false
9130 *
9131 * Returns whether or not automatic compaction has been enabled.
9132 */
9133static VALUE
9134gc_get_auto_compact(VALUE _)
9135{
9136 return ruby_enable_autocompact ? Qtrue : Qfalse;
9137}
9138#else
9139# define gc_get_auto_compact rb_f_notimplement
9140#endif
9141
9142#if GC_CAN_COMPILE_COMPACTION
9143/*
9144 * call-seq:
9145 * GC.latest_compact_info -> hash
9146 *
9147 * Returns information about object moved in the most recent \GC compaction.
9148 *
9149 * The returned +hash+ contains the following keys:
9150 *
9151 * [considered]
9152 * Hash containing the type of the object as the key and the number of
9153 * objects of that type that were considered for movement.
9154 * [moved]
9155 * Hash containing the type of the object as the key and the number of
9156 * objects of that type that were actually moved.
9157 * [moved_up]
9158 * Hash containing the type of the object as the key and the number of
9159 * objects of that type that were increased in size.
9160 * [moved_down]
9161 * Hash containing the type of the object as the key and the number of
9162 * objects of that type that were decreased in size.
9163 *
9164 * Some objects can't be moved (due to pinning) so these numbers can be used to
9165 * calculate compaction efficiency.
9166 */
9167static VALUE
9168gc_compact_stats(VALUE self)
9169{
9170 rb_objspace_t *objspace = rb_gc_get_objspace();
9171 VALUE h = rb_hash_new();
9172 VALUE considered = rb_hash_new();
9173 VALUE moved = rb_hash_new();
9174 VALUE moved_up = rb_hash_new();
9175 VALUE moved_down = rb_hash_new();
9176
9177 for (size_t i = 0; i < T_MASK; i++) {
9178 if (objspace->rcompactor.considered_count_table[i]) {
9179 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
9180 }
9181
9182 if (objspace->rcompactor.moved_count_table[i]) {
9183 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
9184 }
9185
9186 if (objspace->rcompactor.moved_up_count_table[i]) {
9187 rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
9188 }
9189
9190 if (objspace->rcompactor.moved_down_count_table[i]) {
9191 rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
9192 }
9193 }
9194
9195 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
9196 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
9197 rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
9198 rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
9199
9200 return h;
9201}
9202#else
9203# define gc_compact_stats rb_f_notimplement
9204#endif
9205
9206#if GC_CAN_COMPILE_COMPACTION
9207/*
9208 * call-seq:
9209 * GC.compact -> hash
9210 *
9211 * This function compacts objects together in Ruby's heap. It eliminates
9212 * unused space (or fragmentation) in the heap by moving objects in to that
9213 * unused space.
9214 *
9215 * The returned +hash+ contains statistics about the objects that were moved;
9216 * see GC.latest_compact_info.
9217 *
9218 * This method is only expected to work on CRuby.
9219 *
9220 * To test whether \GC compaction is supported, use the idiom:
9221 *
9222 * GC.respond_to?(:compact)
9223 */
9224static VALUE
9225gc_compact(VALUE self)
9226{
9227 rb_objspace_t *objspace = rb_gc_get_objspace();
9228 int full_marking_p = gc_config_full_mark_val;
9229 gc_config_full_mark_set(TRUE);
9230
9231 /* Run GC with compaction enabled */
9232 rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9233 gc_config_full_mark_set(full_marking_p);
9234
9235 return gc_compact_stats(self);
9236}
9237#else
9238# define gc_compact rb_f_notimplement
9239#endif
9240
9241#if GC_CAN_COMPILE_COMPACTION
9242struct desired_compaction_pages_i_data {
9244 size_t required_slots[HEAP_COUNT];
9245};
9246
9247static int
9248desired_compaction_pages_i(struct heap_page *page, void *data)
9249{
9250 struct desired_compaction_pages_i_data *tdata = data;
9251 rb_objspace_t *objspace = tdata->objspace;
9252 VALUE vstart = (VALUE)page->start;
9253 VALUE vend = vstart + (VALUE)(page->total_slots * page->heap->slot_size);
9254
9255
9256 for (VALUE v = vstart; v != vend; v += page->heap->slot_size) {
9257 asan_unpoisoning_object(v) {
9258 /* skip T_NONEs; they won't be moved */
9259 if (BUILTIN_TYPE(v) != T_NONE) {
9260 rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, page->heap, v);
9261 size_t dest_pool_idx = dest_pool - heaps;
9262 tdata->required_slots[dest_pool_idx]++;
9263 }
9264 }
9265 }
9266
9267 return 0;
9268}
9269
9270/* call-seq:
9271 * GC.verify_compaction_references(toward: nil, double_heap: false) -> hash
9272 *
9273 * Verify compaction reference consistency.
9274 *
9275 * This method is implementation specific. During compaction, objects that
9276 * were moved are replaced with T_MOVED objects. No object should have a
9277 * reference to a T_MOVED object after compaction.
9278 *
9279 * This function expands the heap to ensure room to move all objects,
9280 * compacts the heap to make sure everything moves, updates all references,
9281 * then performs a full \GC. If any object contains a reference to a T_MOVED
9282 * object, that object should be pushed on the mark stack, and will
9283 * make a SEGV.
9284 */
9285static VALUE
9286gc_verify_compaction_references(int argc, VALUE* argv, VALUE self)
9287{
9288 static ID keywords[3] = {0};
9289 if (!keywords[0]) {
9290 keywords[0] = rb_intern("toward");
9291 keywords[1] = rb_intern("double_heap");
9292 keywords[2] = rb_intern("expand_heap");
9293 }
9294
9295 VALUE options;
9296 rb_scan_args_kw(rb_keyword_given_p(), argc, argv, ":", &options);
9297
9298 VALUE arguments[3] = { Qnil, Qfalse, Qfalse };
9299 int kwarg_count = rb_get_kwargs(options, keywords, 0, 3, arguments);
9300 bool toward_empty = kwarg_count > 0 && SYMBOL_P(arguments[0]) && SYM2ID(arguments[0]) == rb_intern("empty");
9301 bool expand_heap = (kwarg_count > 1 && RTEST(arguments[1])) || (kwarg_count > 2 && RTEST(arguments[2]));
9302
9303 rb_objspace_t *objspace = rb_gc_get_objspace();
9304
9305 /* Clear the heap. */
9306 rb_gc_impl_start(objspace, true, true, true, false);
9307
9308 unsigned int lev = RB_GC_VM_LOCK();
9309 {
9310 gc_rest(objspace);
9311
9312 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
9313 if (expand_heap) {
9314 struct desired_compaction_pages_i_data desired_compaction = {
9315 .objspace = objspace,
9316 .required_slots = {0},
9317 };
9318 /* Work out how many objects want to be in each size pool, taking account of moves */
9319 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
9320
9321 /* Find out which pool has the most pages */
9322 size_t max_existing_pages = 0;
9323 for (int i = 0; i < HEAP_COUNT; i++) {
9324 rb_heap_t *heap = &heaps[i];
9325 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
9326 }
9327
9328 /* Add pages to each size pool so that compaction is guaranteed to move every object */
9329 for (int i = 0; i < HEAP_COUNT; i++) {
9330 rb_heap_t *heap = &heaps[i];
9331
9332 size_t pages_to_add = 0;
9333 /*
9334 * Step 1: Make sure every pool has the same number of pages, by adding empty pages
9335 * to smaller pools. This is required to make sure the compact cursor can advance
9336 * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
9337 * compact cursors met" condition on some pools before fully compacting others
9338 */
9339 pages_to_add += max_existing_pages - heap->total_pages;
9340 /*
9341 * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
9342 * that want to be in that size pool, whether moved into it or moved within it
9343 */
9344 objspace->heap_pages.allocatable_bytes = desired_compaction.required_slots[i] * heap->slot_size;
9345 while (objspace->heap_pages.allocatable_bytes > 0) {
9346 heap_page_allocate_and_initialize(objspace, heap);
9347 }
9348 /*
9349 * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
9350 * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
9351 */
9352 pages_to_add += 2;
9353
9354 for (; pages_to_add > 0; pages_to_add--) {
9355 heap_page_allocate_and_initialize_force(objspace, heap);
9356 }
9357 }
9358 }
9359
9360 if (toward_empty) {
9361 objspace->rcompactor.compare_func = compare_free_slots;
9362 }
9363 }
9364 RB_GC_VM_UNLOCK(lev);
9365
9366 rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9367
9368 rb_objspace_reachable_objects_from_root(root_obj_check_moved_i, objspace);
9369 objspace_each_objects(objspace, heap_check_moved_i, objspace, TRUE);
9370
9371 objspace->rcompactor.compare_func = NULL;
9372
9373 return gc_compact_stats(self);
9374}
9375#else
9376# define gc_verify_compaction_references rb_f_notimplement
9377#endif
9378
9379void
9380rb_gc_impl_objspace_free(void *objspace_ptr)
9381{
9382 rb_objspace_t *objspace = objspace_ptr;
9383
9384 if (is_lazy_sweeping(objspace))
9385 rb_bug("lazy sweeping underway when freeing object space");
9386
9387 free(objspace->profile.records);
9388 objspace->profile.records = NULL;
9389
9390 for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
9391 heap_page_free(objspace, rb_darray_get(objspace->heap_pages.sorted, i));
9392 }
9393 rb_darray_free_without_gc(objspace->heap_pages.sorted);
9394 heap_pages_lomem = 0;
9395 heap_pages_himem = 0;
9396
9397 for (int i = 0; i < HEAP_COUNT; i++) {
9398 rb_heap_t *heap = &heaps[i];
9399 heap->total_pages = 0;
9400 heap->total_slots = 0;
9401 }
9402
9403 free_stack_chunks(&objspace->mark_stack);
9404 mark_stack_free_cache(&objspace->mark_stack);
9405
9406 rb_darray_free_without_gc(objspace->weak_references);
9407
9408 free(objspace);
9409}
9410
9411#if MALLOC_ALLOCATED_SIZE
9412/*
9413 * call-seq:
9414 * GC.malloc_allocated_size -> Integer
9415 *
9416 * Returns the size of memory allocated by malloc().
9417 *
9418 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9419 */
9420
9421static VALUE
9422gc_malloc_allocated_size(VALUE self)
9423{
9424 rb_objspace_t *objspace = (rb_objspace_t *)rb_gc_get_objspace();
9425 return ULL2NUM(objspace->malloc_params.allocated_size);
9426}
9427
9428/*
9429 * call-seq:
9430 * GC.malloc_allocations -> Integer
9431 *
9432 * Returns the number of malloc() allocations.
9433 *
9434 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9435 */
9436
9437static VALUE
9438gc_malloc_allocations(VALUE self)
9439{
9440 rb_objspace_t *objspace = (rb_objspace_t *)rb_gc_get_objspace();
9441 return ULL2NUM(objspace->malloc_params.allocations);
9442}
9443#endif
9444
9445void
9446rb_gc_impl_before_fork(void *objspace_ptr)
9447{
9448 rb_objspace_t *objspace = objspace_ptr;
9449
9450 objspace->fork_vm_lock_lev = RB_GC_VM_LOCK();
9451 rb_gc_vm_barrier();
9452}
9453
9454void
9455rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
9456{
9457 rb_objspace_t *objspace = objspace_ptr;
9458
9459 RB_GC_VM_UNLOCK(objspace->fork_vm_lock_lev);
9460 objspace->fork_vm_lock_lev = 0;
9461
9462 if (pid == 0) { /* child process */
9463 rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
9464 }
9465}
9466
9467VALUE rb_ident_hash_new_with_size(st_index_t size);
9468
9469#if GC_DEBUG_STRESS_TO_CLASS
9470/*
9471 * call-seq:
9472 * GC.add_stress_to_class(class[, ...])
9473 *
9474 * Raises NoMemoryError when allocating an instance of the given classes.
9475 *
9476 */
9477static VALUE
9478rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
9479{
9480 rb_objspace_t *objspace = rb_gc_get_objspace();
9481
9482 if (!stress_to_class) {
9483 set_stress_to_class(rb_ident_hash_new_with_size(argc));
9484 }
9485
9486 for (int i = 0; i < argc; i++) {
9487 VALUE klass = argv[i];
9488 rb_hash_aset(stress_to_class, klass, Qtrue);
9489 }
9490
9491 return self;
9492}
9493
9494/*
9495 * call-seq:
9496 * GC.remove_stress_to_class(class[, ...])
9497 *
9498 * No longer raises NoMemoryError when allocating an instance of the
9499 * given classes.
9500 *
9501 */
9502static VALUE
9503rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
9504{
9505 rb_objspace_t *objspace = rb_gc_get_objspace();
9506
9507 if (stress_to_class) {
9508 for (int i = 0; i < argc; ++i) {
9509 rb_hash_delete(stress_to_class, argv[i]);
9510 }
9511
9512 if (rb_hash_size(stress_to_class) == 0) {
9513 stress_to_class = 0;
9514 }
9515 }
9516
9517 return Qnil;
9518}
9519#endif
9520
9521void *
9522rb_gc_impl_objspace_alloc(void)
9523{
9524 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
9525
9526 return objspace;
9527}
9528
9529void
9530rb_gc_impl_objspace_init(void *objspace_ptr)
9531{
9532 rb_objspace_t *objspace = objspace_ptr;
9533
9534 gc_config_full_mark_set(TRUE);
9535
9536 objspace->flags.measure_gc = true;
9537 malloc_limit = gc_params.malloc_limit_min;
9538 objspace->finalize_deferred_pjob = rb_postponed_job_preregister(0, gc_finalize_deferred, objspace);
9539 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
9540 rb_bug("Could not preregister postponed job for GC");
9541 }
9542
9543 /* A standard RVALUE (RBasic + embedded VALUEs + debug overhead) must fit
9544 * in at least one pool. In debug builds RVALUE_OVERHEAD can push this
9545 * beyond the 48-byte pool into the 64-byte pool, which is fine. */
9546 GC_ASSERT(rb_gc_impl_size_allocatable_p(sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX])));
9547
9548 for (int i = 0; i < HEAP_COUNT; i++) {
9549 rb_heap_t *heap = &heaps[i];
9550
9551 heap->slot_size = pool_slot_sizes[i];
9552
9553 ccan_list_head_init(&heap->pages);
9554 }
9555
9556 init_size_to_heap_idx();
9557
9558 rb_darray_make_without_gc(&objspace->heap_pages.sorted, 0);
9559 rb_darray_make_without_gc(&objspace->weak_references, 0);
9560
9561#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
9562 /* Need to determine if we can use mmap at runtime. */
9563 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9564#endif
9565#if RGENGC_ESTIMATE_OLDMALLOC
9566 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9567#endif
9568 gc_params.heap_init_bytes = GC_HEAP_INIT_BYTES;
9569
9570 init_mark_stack(&objspace->mark_stack);
9571
9572 objspace->profile.invoke_time = getrusage_time();
9573 finalizer_table = st_init_numtable();
9574}
9575
9576void
9577rb_gc_impl_init(void)
9578{
9579 VALUE gc_constants = rb_hash_new();
9580 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), GC_DEBUG ? Qtrue : Qfalse);
9581 /* Minimum slot size that fits a standard RVALUE */
9582 size_t rvalue_pool = 0;
9583 for (size_t i = 0; i < HEAP_COUNT; i++) {
9584 if (pool_slot_sizes[i] >= RVALUE_SLOT_SIZE) { rvalue_pool = pool_slot_sizes[i]; break; }
9585 }
9586 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(rvalue_pool - RVALUE_OVERHEAD));
9587 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
9588 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
9589 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
9590 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
9591 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(HEAP_COUNT));
9592 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(heap_slot_size(HEAP_COUNT - 1)));
9593 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE));
9594 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
9595 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue);
9596 }
9597 OBJ_FREEZE(gc_constants);
9598 /* Internal constants in the garbage collector. */
9599 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
9600
9601 if (GC_COMPACTION_SUPPORTED) {
9602 rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
9603 rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
9604 rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
9605 rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
9606 rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
9607 }
9608 else {
9612 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
9613 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
9614 }
9615
9616#if GC_DEBUG_STRESS_TO_CLASS
9617 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
9618 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
9619#endif
9620
9621 /* internal methods */
9622 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
9623
9624#if MALLOC_ALLOCATED_SIZE
9625 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
9626 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
9627#endif
9628
9629 VALUE rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
9630 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
9631 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
9632 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
9633 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
9634 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
9635 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
9636 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
9637 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
9638
9639 {
9640 VALUE opts;
9641 /* \GC build options */
9642 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
9643#define OPT(o) if (o) rb_ary_push(opts, rb_interned_str(#o, sizeof(#o) - 1))
9644 OPT(GC_DEBUG);
9645 OPT(USE_RGENGC);
9646 OPT(RGENGC_DEBUG);
9647 OPT(RGENGC_CHECK_MODE);
9648 OPT(RGENGC_PROFILE);
9649 OPT(RGENGC_ESTIMATE_OLDMALLOC);
9650 OPT(GC_PROFILE_MORE_DETAIL);
9651 OPT(GC_ENABLE_LAZY_SWEEP);
9652 OPT(CALC_EXACT_MALLOC_SIZE);
9653 OPT(MALLOC_ALLOCATED_SIZE);
9654 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
9655 OPT(GC_PROFILE_DETAIL_MEMORY);
9656 OPT(GC_COMPACTION_SUPPORTED);
9657#undef OPT
9658 OBJ_FREEZE(opts);
9659 }
9660}
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:311
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
Atomic operations.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are size_t.
Definition atomic.h:270
#define RUBY_ATOMIC_SIZE_INC(var)
Identical to RUBY_ATOMIC_INC, except it expects its argument is size_t.
Definition atomic.h:246
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are size_t.
Definition atomic.h:284
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_SIZE_ADD(var, val)
Identical to RUBY_ATOMIC_ADD, except it expects its arguments are size_t.
Definition atomic.h:297
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are VALUE.
Definition atomic.h:392
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
Definition atomic.h:185
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
Definition atomic.h:152
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:703
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1916
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1882
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition defines.h:91
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition event.h:99
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition event.h:98
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition event.h:97
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition event.h:96
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:404
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:541
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition fl_type.h:601
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
Definition fl_type.h:205
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
Definition fl_type.h:260
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition class.c:1532
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
Definition class.c:3074
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1031
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition class.c:2850
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:133
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
Definition fl_type.h:62
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:126
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:95
VALUE rb_mGC
GC module.
Definition gc.c:410
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:141
VALUE rb_stdout
STDOUT constant.
Definition io.c:201
Routines to manipulate encodings of strings.
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
Definition gc.h:558
#define USE_RGENGC
Definition gc.h:428
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3802
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition string.c:1720
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1515
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition vm.c:2074
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:859
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2088
#define RB_SYM2ID
Just another name of rb_sym2id.
Definition symbol.h:43
ID rb_sym2id(VALUE obj)
Converts an instance of rb_cSymbol into an ID.
Definition symbol.c:974
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2063
#define strtod(s, e)
Just another name of ruby_strtod.
Definition util.h:223
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby object's base components.
Definition rbasic.h:69
Definition gc_impl.h:15
Private header for the default GC and other GC implementations first introduced for [Feature #20470].
Definition gc.h:16
Definition st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113
@ RUBY_T_SYMBOL
Definition value_type.h:135
@ RUBY_T_MATCH
Definition value_type.h:128
@ RUBY_T_MODULE
Definition value_type.h:118
@ RUBY_T_ICLASS
Hidden classes known as IClasses.
Definition value_type.h:141
@ RUBY_T_MOVED
Definition value_type.h:143
@ RUBY_T_FIXNUM
Integers formerly known as Fixnums.
Definition value_type.h:136
@ RUBY_T_IMEMO
Definition value_type.h:139
@ RUBY_T_NODE
Definition value_type.h:140
@ RUBY_T_OBJECT
Definition value_type.h:116
@ RUBY_T_DATA
Definition value_type.h:127
@ RUBY_T_FALSE
Definition value_type.h:134
@ RUBY_T_UNDEF
Definition value_type.h:137
@ RUBY_T_COMPLEX
Definition value_type.h:129
@ RUBY_T_STRING
Definition value_type.h:120
@ RUBY_T_HASH
Definition value_type.h:123
@ RUBY_T_NIL
Definition value_type.h:132
@ RUBY_T_CLASS
Definition value_type.h:117
@ RUBY_T_ARRAY
Definition value_type.h:122
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:145
@ RUBY_T_RATIONAL
Definition value_type.h:130
@ RUBY_T_ZOMBIE
Definition value_type.h:142
@ RUBY_T_BIGNUM
Definition value_type.h:125
@ RUBY_T_TRUE
Definition value_type.h:133
@ RUBY_T_FLOAT
Definition value_type.h:119
@ RUBY_T_STRUCT
Definition value_type.h:124
@ RUBY_T_NONE
Non-object (swept etc.)
Definition value_type.h:114
@ RUBY_T_REGEXP
Definition value_type.h:121
@ RUBY_T_FILE
Definition value_type.h:126