14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
17#include "ruby/internal/config.h"
24#define sighandler_t ruby_sighandler_t
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
41#ifndef HAVE_MALLOC_USABLE_SIZE
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
54# elif defined(HAVE_MALLOC_H)
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
63#ifdef HAVE_MALLOC_TRIM
68# include <emscripten/emmalloc.h>
72#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
82#ifdef HAVE_SYS_RESOURCE_H
83# include <sys/resource.h>
86#if defined _WIN32 || defined __CYGWIN__
88#elif defined(HAVE_POSIX_MEMALIGN)
89#elif defined(HAVE_MEMALIGN)
96#include <emscripten.h>
99#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
100# include <mach/task.h>
101# include <mach/mach_init.h>
102# include <mach/mach_port.h>
108#include "debug_counter.h"
109#include "eval_intern.h"
112#include "internal/class.h"
113#include "internal/compile.h"
114#include "internal/complex.h"
115#include "internal/cont.h"
116#include "internal/error.h"
117#include "internal/eval.h"
118#include "internal/gc.h"
119#include "internal/hash.h"
120#include "internal/imemo.h"
121#include "internal/io.h"
122#include "internal/numeric.h"
123#include "internal/object.h"
124#include "internal/proc.h"
125#include "internal/rational.h"
126#include "internal/sanitizers.h"
127#include "internal/struct.h"
128#include "internal/symbol.h"
129#include "internal/thread.h"
130#include "internal/variable.h"
131#include "internal/warnings.h"
141#include "ruby_assert.h"
142#include "ruby_atomic.h"
146#include "vm_callinfo.h"
147#include "ractor_core.h"
152#define rb_setjmp(env) RUBY_SETJMP(env)
153#define rb_jmp_buf rb_jmpbuf_t
154#undef rb_data_object_wrap
156#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
157#define MAP_ANONYMOUS MAP_ANON
161static size_t malloc_offset = 0;
162#if defined(HAVE_MALLOC_USABLE_SIZE)
164gc_compute_malloc_offset(
void)
175 for (offset = 0; offset <= 16; offset += 8) {
176 size_t allocated = (64 - offset);
177 void *test_ptr = malloc(allocated);
178 size_t wasted = malloc_usable_size(test_ptr) - allocated;
189gc_compute_malloc_offset(
void)
197rb_malloc_grow_capa(
size_t current,
size_t type_size)
199 size_t current_capacity = current;
200 if (current_capacity < 4) {
201 current_capacity = 4;
203 current_capacity *= type_size;
206 size_t new_capacity = (current_capacity * 2);
209 if (rb_popcount64(new_capacity) != 1) {
210 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
213 new_capacity -= malloc_offset;
214 new_capacity /= type_size;
215 if (current > new_capacity) {
216 rb_bug(
"rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
222static inline struct rbimpl_size_mul_overflow_tag
223size_add_overflow(size_t x, size_t y)
229#elif __has_builtin(__builtin_add_overflow)
230 p = __builtin_add_overflow(x, y, &z);
232#elif defined(DSIZE_T)
244 return (
struct rbimpl_size_mul_overflow_tag) { p, z, };
247static inline struct rbimpl_size_mul_overflow_tag
248size_mul_add_overflow(size_t x, size_t y, size_t z)
250 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
251 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
252 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
255static inline struct rbimpl_size_mul_overflow_tag
256size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
258 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
259 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
260 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
261 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
264PRINTF_ARGS(NORETURN(
static void gc_raise(
VALUE,
const char*, ...)), 2, 3);
267size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
269 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
270 if (LIKELY(!t.left)) {
273 else if (rb_during_gc()) {
279 "integer overflow: %"PRIuSIZE
282 x, y, (
size_t)SIZE_MAX);
287rb_size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
289 return size_mul_or_raise(x, y, exc);
293size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
295 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
296 if (LIKELY(!t.left)) {
299 else if (rb_during_gc()) {
305 "integer overflow: %"PRIuSIZE
309 x, y, z, (
size_t)SIZE_MAX);
314rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
316 return size_mul_add_or_raise(x, y, z, exc);
320size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
322 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
323 if (LIKELY(!t.left)) {
326 else if (rb_during_gc()) {
332 "integer overflow: %"PRIdSIZE
337 x, y, z, w, (
size_t)SIZE_MAX);
341#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
343volatile VALUE rb_gc_guarded_val;
345rb_gc_guarded_ptr_val(
volatile VALUE *ptr,
VALUE val)
347 rb_gc_guarded_val = val;
353#ifndef GC_HEAP_INIT_SLOTS
354#define GC_HEAP_INIT_SLOTS 10000
356#ifndef GC_HEAP_FREE_SLOTS
357#define GC_HEAP_FREE_SLOTS 4096
359#ifndef GC_HEAP_GROWTH_FACTOR
360#define GC_HEAP_GROWTH_FACTOR 1.8
362#ifndef GC_HEAP_GROWTH_MAX_SLOTS
363#define GC_HEAP_GROWTH_MAX_SLOTS 0
365#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
366# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
368#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
369#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
372#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
373#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
375#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
376#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
378#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
379#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
382#ifndef GC_MALLOC_LIMIT_MIN
383#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
385#ifndef GC_MALLOC_LIMIT_MAX
386#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
388#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
389#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
392#ifndef GC_OLDMALLOC_LIMIT_MIN
393#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
395#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
396#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
398#ifndef GC_OLDMALLOC_LIMIT_MAX
399#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
402#ifndef GC_CAN_COMPILE_COMPACTION
404# define GC_CAN_COMPILE_COMPACTION 0
406# define GC_CAN_COMPILE_COMPACTION 1
410#ifndef PRINT_MEASURE_LINE
411#define PRINT_MEASURE_LINE 0
413#ifndef PRINT_ENTER_EXIT_TICK
414#define PRINT_ENTER_EXIT_TICK 0
416#ifndef PRINT_ROOT_TICKS
417#define PRINT_ROOT_TICKS 0
420#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
424 size_t size_pool_init_slots[SIZE_POOL_COUNT];
425 size_t heap_free_slots;
426 double growth_factor;
427 size_t growth_max_slots;
429 double heap_free_slots_min_ratio;
430 double heap_free_slots_goal_ratio;
431 double heap_free_slots_max_ratio;
432 double uncollectible_wb_unprotected_objects_limit_ratio;
433 double oldobject_limit_factor;
435 size_t malloc_limit_min;
436 size_t malloc_limit_max;
437 double malloc_limit_growth_factor;
439 size_t oldmalloc_limit_min;
440 size_t oldmalloc_limit_max;
441 double oldmalloc_limit_growth_factor;
449 GC_HEAP_GROWTH_FACTOR,
450 GC_HEAP_GROWTH_MAX_SLOTS,
452 GC_HEAP_FREE_SLOTS_MIN_RATIO,
453 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
454 GC_HEAP_FREE_SLOTS_MAX_RATIO,
455 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
456 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
460 GC_MALLOC_LIMIT_GROWTH_FACTOR,
462 GC_OLDMALLOC_LIMIT_MIN,
463 GC_OLDMALLOC_LIMIT_MAX,
464 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
485#define RGENGC_DEBUG -1
487#define RGENGC_DEBUG 0
490#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
491# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
492#elif defined(HAVE_VA_ARGS_MACRO)
493# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
495# define RGENGC_DEBUG_ENABLED(level) 0
497int ruby_rgengc_debug;
507#ifndef RGENGC_CHECK_MODE
508#define RGENGC_CHECK_MODE 0
512#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
519#ifndef RGENGC_PROFILE
520#define RGENGC_PROFILE 0
529#ifndef RGENGC_ESTIMATE_OLDMALLOC
530#define RGENGC_ESTIMATE_OLDMALLOC 1
536#ifndef RGENGC_FORCE_MAJOR_GC
537#define RGENGC_FORCE_MAJOR_GC 0
540#ifndef GC_PROFILE_MORE_DETAIL
541#define GC_PROFILE_MORE_DETAIL 0
543#ifndef GC_PROFILE_DETAIL_MEMORY
544#define GC_PROFILE_DETAIL_MEMORY 0
546#ifndef GC_ENABLE_LAZY_SWEEP
547#define GC_ENABLE_LAZY_SWEEP 1
549#ifndef CALC_EXACT_MALLOC_SIZE
550#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
552#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
553#ifndef MALLOC_ALLOCATED_SIZE
554#define MALLOC_ALLOCATED_SIZE 0
557#define MALLOC_ALLOCATED_SIZE 0
559#ifndef MALLOC_ALLOCATED_SIZE_CHECK
560#define MALLOC_ALLOCATED_SIZE_CHECK 0
563#ifndef GC_DEBUG_STRESS_TO_CLASS
564#define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
567#ifndef RGENGC_OBJ_INFO
568#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
572 GPR_FLAG_NONE = 0x000,
574 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
575 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
576 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
577 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
578#if RGENGC_ESTIMATE_OLDMALLOC
579 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
581 GPR_FLAG_MAJOR_MASK = 0x0ff,
584 GPR_FLAG_NEWOBJ = 0x100,
585 GPR_FLAG_MALLOC = 0x200,
586 GPR_FLAG_METHOD = 0x400,
587 GPR_FLAG_CAPI = 0x800,
588 GPR_FLAG_STRESS = 0x1000,
591 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
592 GPR_FLAG_HAVE_FINALIZE = 0x4000,
593 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
594 GPR_FLAG_FULL_MARK = 0x10000,
595 GPR_FLAG_COMPACT = 0x20000,
598 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
599 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
600} gc_profile_record_flag;
606 double gc_invoke_time;
608 size_t heap_total_objects;
609 size_t heap_use_size;
610 size_t heap_total_size;
611 size_t moved_objects;
613#if GC_PROFILE_MORE_DETAIL
615 double gc_sweep_time;
617 size_t heap_use_pages;
618 size_t heap_live_objects;
619 size_t heap_free_objects;
621 size_t allocate_increase;
622 size_t allocate_limit;
625 size_t removing_objects;
626 size_t empty_objects;
627#if GC_PROFILE_DETAIL_MEMORY
633#if MALLOC_ALLOCATED_SIZE
634 size_t allocated_size;
637#if RGENGC_PROFILE > 0
639 size_t remembered_normal_objects;
640 size_t remembered_shady_objects;
648 shape_id_t original_shape_id;
651#define RMOVED(obj) ((struct RMoved *)(obj))
702 uint32_t _ractor_belonging_id;
711# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
713# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
715# define RVALUE_OVERHEAD 0
721typedef uintptr_t bits_t;
723 BITS_SIZE =
sizeof(bits_t),
724 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
726#define popcount_bits rb_popcount_intptr
743#define STACK_CHUNK_SIZE 500
746 VALUE data[STACK_CHUNK_SIZE];
756 size_t unused_cache_size;
759#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
760#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
762typedef int (*gc_compact_compare_func)(
const void *l,
const void *r,
void *d);
766 struct ccan_list_head pages;
769 uintptr_t compact_cursor_index;
778 size_t allocatable_pages;
781 size_t total_allocated_pages;
782 size_t total_freed_pages;
783 size_t force_major_gc_count;
784 size_t force_incremental_marking_finish_count;
785 size_t total_allocated_objects;
786 size_t total_freed_objects;
807#if MALLOC_ALLOCATED_SIZE
808 size_t allocated_size;
815 unsigned int mode : 2;
816 unsigned int immediate_sweep : 1;
817 unsigned int dont_gc : 1;
818 unsigned int dont_incremental : 1;
819 unsigned int during_gc : 1;
820 unsigned int during_compacting : 1;
821 unsigned int during_reference_updating : 1;
822 unsigned int gc_stressful: 1;
823 unsigned int has_newobj_hook: 1;
824 unsigned int during_minor_gc : 1;
825 unsigned int during_incremental_marking : 1;
826 unsigned int measure_gc : 1;
830 VALUE next_object_id;
843 size_t allocated_pages;
844 size_t allocatable_pages;
845 size_t sorted_length;
847 size_t freeable_pages;
851 VALUE deferred_final;
858 unsigned int latest_gc_info;
864#if GC_PROFILE_MORE_DETAIL
869 size_t minor_gc_count;
870 size_t major_gc_count;
871 size_t compact_count;
872 size_t read_barrier_faults;
873#if RGENGC_PROFILE > 0
874 size_t total_generated_normal_object_count;
875 size_t total_generated_shady_object_count;
876 size_t total_shade_operation_count;
877 size_t total_promoted_count;
878 size_t total_remembered_normal_object_count;
879 size_t total_remembered_shady_object_count;
881#if RGENGC_PROFILE >= 2
882 size_t generated_normal_object_count_types[
RUBY_T_MASK];
883 size_t generated_shady_object_count_types[
RUBY_T_MASK];
886 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
887 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
892 double gc_sweep_start_time;
893 size_t total_allocated_objects_at_gc_start;
894 size_t heap_used_at_gc_start;
898 uint64_t marking_time_ns;
900 uint64_t sweeping_time_ns;
901 struct timespec sweeping_start_time;
904 size_t weak_references_count;
905 size_t retained_weak_references_count;
909 VALUE gc_stress_mode;
914 size_t last_major_gc;
915 size_t uncollectible_wb_unprotected_objects;
916 size_t uncollectible_wb_unprotected_objects_limit;
918 size_t old_objects_limit;
920#if RGENGC_ESTIMATE_OLDMALLOC
921 size_t oldmalloc_increase;
922 size_t oldmalloc_increase_limit;
925#if RGENGC_CHECK_MODE >= 2
932 size_t considered_count_table[
T_MASK];
933 size_t moved_count_table[
T_MASK];
934 size_t moved_up_count_table[
T_MASK];
935 size_t moved_down_count_table[
T_MASK];
939 gc_compact_compare_func compare_func;
950#if GC_DEBUG_STRESS_TO_CLASS
951 VALUE stress_to_class;
954 rb_darray(
VALUE *) weak_references;
959#ifndef HEAP_PAGE_ALIGN_LOG
961#define HEAP_PAGE_ALIGN_LOG 16
964#define BASE_SLOT_SIZE sizeof(RVALUE)
966#define CEILDIV(i, mod) roomof(i, mod)
968 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
969 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
970 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
971 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)) / BASE_SLOT_SIZE),
972 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
973 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
975#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
976#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
978#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
979# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
982#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
988static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
990#elif defined(__wasm__)
994static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
996#elif HAVE_CONST_PAGE_SIZE
998static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
1000#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
1002static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
1004#elif defined(PAGE_SIZE)
1006# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
1008#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
1010# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
1014static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
1017#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
1019# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
1021static bool heap_page_alloc_use_mmap;
1024#define RVALUE_AGE_BIT_COUNT 2
1025#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
1034 unsigned int before_sweep : 1;
1035 unsigned int has_remembered_objects : 1;
1036 unsigned int has_uncollectible_wb_unprotected_objects : 1;
1037 unsigned int in_tomb : 1;
1045 struct ccan_list_node page_node;
1047 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
1049 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
1050 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
1051 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
1053 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
1056 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
1057 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
1064asan_lock_freelist(
struct heap_page *page)
1066 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1073asan_unlock_freelist(
struct heap_page *page)
1075 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1078#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
1079#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
1080#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
1082#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
1083#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
1084#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
1085#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1088#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1089#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1090#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1093#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1094#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1095#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1096#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1097#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1099#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1101#define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
1102#define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
1104#define RVALUE_OLD_AGE 3
1107RVALUE_AGE_GET(
VALUE obj)
1109 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1110 return (
int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
1114RVALUE_AGE_SET(
VALUE obj,
int age)
1117 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1119 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
1121 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
1122 if (age == RVALUE_OLD_AGE) {
1131#define rb_objspace (*rb_objspace_of(GET_VM()))
1132#define rb_objspace_of(vm) ((vm)->objspace)
1133#define unless_objspace(objspace) \
1134 rb_objspace_t *objspace; \
1135 rb_vm_t *unless_objspace_vm = GET_VM(); \
1136 if (unless_objspace_vm) objspace = unless_objspace_vm->objspace; \
1139#define ruby_initial_gc_stress gc_params.gc_stress
1141VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1143#define malloc_limit objspace->malloc_params.limit
1144#define malloc_increase objspace->malloc_params.increase
1145#define malloc_allocated_size objspace->malloc_params.allocated_size
1146#define heap_pages_sorted objspace->heap_pages.sorted
1147#define heap_allocated_pages objspace->heap_pages.allocated_pages
1148#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1149#define heap_pages_lomem objspace->heap_pages.range[0]
1150#define heap_pages_himem objspace->heap_pages.range[1]
1151#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1152#define heap_pages_final_slots objspace->heap_pages.final_slots
1153#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1154#define size_pools objspace->size_pools
1155#define during_gc objspace->flags.during_gc
1156#define finalizing objspace->atomic_flags.finalizing
1157#define finalizer_table objspace->finalizer_table
1158#define global_list objspace->global_list
1159#define ruby_gc_stressful objspace->flags.gc_stressful
1160#define ruby_gc_stress_mode objspace->gc_stress_mode
1161#if GC_DEBUG_STRESS_TO_CLASS
1162#define stress_to_class objspace->stress_to_class
1163#define set_stress_to_class(c) (stress_to_class = (c))
1165#define stress_to_class (objspace, 0)
1166#define set_stress_to_class(c) (objspace, (c))
1170#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1171#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1172#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1173#define dont_gc_val() (objspace->flags.dont_gc)
1175#define dont_gc_on() (objspace->flags.dont_gc = 1)
1176#define dont_gc_off() (objspace->flags.dont_gc = 0)
1177#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1178#define dont_gc_val() (objspace->flags.dont_gc)
1181static inline enum gc_mode
1182gc_mode_verify(
enum gc_mode mode)
1184#if RGENGC_CHECK_MODE > 0
1187 case gc_mode_marking:
1188 case gc_mode_sweeping:
1189 case gc_mode_compacting:
1192 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
1201 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1202 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1213 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1214 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1223 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1224 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1233 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1234 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1243 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1244 count += size_pools[i].allocatable_pages;
1253 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1255 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1256 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1265 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1267 count += size_pool->total_allocated_pages;
1276 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1278 count += size_pool->total_freed_pages;
1287 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1289 count += size_pool->total_allocated_objects;
1298 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1300 count += size_pool->total_freed_objects;
1305#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1306#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1308#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1309#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1310#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1311#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1312#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1313#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1314#define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
1315#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1317#if SIZEOF_LONG == SIZEOF_VOIDP
1318# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1319#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1320# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1321 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1323# error not supported
1326#define RANY(o) ((RVALUE*)(o))
1331 void (*dfree)(
void *);
1335#define RZOMBIE(o) ((struct RZombie *)(o))
1337#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1339#if RUBY_MARK_FREE_DEBUG
1340int ruby_gc_debug_indent = 0;
1343int ruby_disable_gc = 0;
1344int ruby_enable_autocompact = 0;
1345#if RGENGC_CHECK_MODE
1346gc_compact_compare_func ruby_autocompact_compare_func;
1349void rb_iseq_mark_and_move(
rb_iseq_t *iseq,
bool referece_updating);
1350void rb_iseq_free(
const rb_iseq_t *iseq);
1351size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1352void rb_vm_update_references(
void *ptr);
1354void rb_gcdebug_print_obj_condition(
VALUE obj);
1356NORETURN(
static void *gc_vraise(
void *ptr));
1357NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
1358NORETURN(
static void negative_size_allocation_error(
const char *));
1361static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1363static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1366enum gc_enter_event {
1367 gc_enter_event_start,
1368 gc_enter_event_continue,
1369 gc_enter_event_rest,
1370 gc_enter_event_finalizer,
1371 gc_enter_event_rb_memerror,
1374static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1375static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1391static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1392NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1394static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1395static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1400static double getrusage_time(
void);
1401static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1404static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1406static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1407static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1412 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1413 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1419#define gc_prof_record(objspace) (objspace)->profile.current_record
1420#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1422#ifdef HAVE_VA_ARGS_MACRO
1423# define gc_report(level, objspace, ...) \
1424 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1426# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1428PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1429static const char *obj_info(
VALUE obj);
1430static const char *obj_type_name(
VALUE obj);
1432static void gc_finalize_deferred(
void *dmy);
1452#if defined(__GNUC__) && defined(__i386__)
1453typedef unsigned long long tick_t;
1454#define PRItick "llu"
1458 unsigned long long int x;
1459 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1463#elif defined(__GNUC__) && defined(__x86_64__)
1464typedef unsigned long long tick_t;
1465#define PRItick "llu"
1467static __inline__ tick_t
1470 unsigned long hi, lo;
1471 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1472 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1475#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1476typedef unsigned long long tick_t;
1477#define PRItick "llu"
1479static __inline__ tick_t
1482 unsigned long long val = __builtin_ppc_get_timebase();
1489#elif defined(__POWERPC__) && defined(__APPLE__)
1490typedef unsigned long long tick_t;
1491#define PRItick "llu"
1493static __inline__ tick_t
1496 unsigned long int upper, lower, tmp;
1497 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1498 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1503 }
while (tmp != upper);
1504 return ((tick_t)upper << 32) | lower;
1507#elif defined(__aarch64__) && defined(__GNUC__)
1508typedef unsigned long tick_t;
1511static __inline__ tick_t
1515 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1520#elif defined(_WIN32) && defined(_MSC_VER)
1522typedef unsigned __int64 tick_t;
1523#define PRItick "llu"
1532typedef clock_t tick_t;
1533#define PRItick "llu"
1543typedef double tick_t;
1544#define PRItick "4.9f"
1549 return getrusage_time();
1552#error "choose tick type"
1555#define MEASURE_LINE(expr) do { \
1556 volatile tick_t start_time = tick(); \
1557 volatile tick_t end_time; \
1559 end_time = tick(); \
1560 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1564#define MEASURE_LINE(expr) expr
1568asan_unpoison_object_temporary(
VALUE obj)
1570 void *ptr = asan_poisoned_object_p(obj);
1571 asan_unpoison_object(obj,
false);
1576asan_poison_object_restore(
VALUE obj,
void *ptr)
1579 asan_poison_object(obj);
1584#define asan_unpoisoning_object(obj) \
1585 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1586 *unpoisoning = &poisoned; \
1588 unpoisoning = asan_poison_object_restore(obj, poisoned))
1590#define FL_CHECK2(name, x, pred) \
1591 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1592 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1593#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1594#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1595#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1597#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1598#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1599#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1601#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1602#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1603#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1605#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1606#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1607#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1614check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1619 RB_VM_LOCK_ENTER_NO_BARRIER();
1622 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1625 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1628 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1630 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1631 if (page->start <= (uintptr_t)obj &&
1632 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1633 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1634 (
void *)obj, (
void *)page);
1641 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1647 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1648 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1649 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1650 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1651 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1652 const int age = RVALUE_AGE_GET((
VALUE)obj);
1654 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1655 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1659 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1663 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1667 obj_memsize_of((
VALUE)obj, FALSE);
1673 if (age > 0 && wb_unprotected_bit) {
1674 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1678 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1679 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1683 if (!is_full_marking(objspace)) {
1684 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1685 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1686 obj_info(obj), age);
1689 if (remembered_bit && age != RVALUE_OLD_AGE) {
1690 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1691 obj_info(obj), age);
1703 if (is_incremental_marking(objspace) && marking_bit) {
1704 if (!is_marking(objspace) && !mark_bit) {
1705 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1711 RB_VM_LOCK_LEAVE_NO_BARRIER();
1713 if (err > 0 && terminate) {
1714 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1719#if RGENGC_CHECK_MODE == 0
1721check_rvalue_consistency(
const VALUE obj)
1727check_rvalue_consistency(
const VALUE obj)
1729 check_rvalue_consistency_force(obj, TRUE);
1741 void *poisoned = asan_unpoison_object_temporary(obj);
1747 asan_poison_object(obj);
1754RVALUE_MARKED(
VALUE obj)
1756 check_rvalue_consistency(obj);
1757 return RVALUE_MARK_BITMAP(obj) != 0;
1761RVALUE_PINNED(
VALUE obj)
1763 check_rvalue_consistency(obj);
1764 return RVALUE_PIN_BITMAP(obj) != 0;
1768RVALUE_WB_UNPROTECTED(
VALUE obj)
1770 check_rvalue_consistency(obj);
1771 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1775RVALUE_MARKING(
VALUE obj)
1777 check_rvalue_consistency(obj);
1778 return RVALUE_MARKING_BITMAP(obj) != 0;
1782RVALUE_REMEMBERED(
VALUE obj)
1784 check_rvalue_consistency(obj);
1785 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1789RVALUE_UNCOLLECTIBLE(
VALUE obj)
1791 check_rvalue_consistency(obj);
1792 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1796RVALUE_OLD_P(
VALUE obj)
1799 check_rvalue_consistency(obj);
1808 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1809 objspace->rgengc.old_objects++;
1811#if RGENGC_PROFILE >= 2
1812 objspace->profile.total_promoted_count++;
1820 RB_DEBUG_COUNTER_INC(obj_promote);
1821 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1828 int age = RVALUE_AGE_GET((
VALUE)obj);
1830 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1831 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1835 RVALUE_AGE_SET(obj, age);
1837 if (age == RVALUE_OLD_AGE) {
1838 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1841 check_rvalue_consistency(obj);
1847 check_rvalue_consistency(obj);
1848 GC_ASSERT(!RVALUE_OLD_P(obj));
1849 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1850 check_rvalue_consistency(obj);
1854RVALUE_AGE_RESET(
VALUE obj)
1856 RVALUE_AGE_SET(obj, 0);
1862 check_rvalue_consistency(obj);
1863 GC_ASSERT(RVALUE_OLD_P(obj));
1865 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1866 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1869 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1870 RVALUE_AGE_RESET(obj);
1872 if (RVALUE_MARKED(obj)) {
1873 objspace->rgengc.old_objects--;
1876 check_rvalue_consistency(obj);
1880RVALUE_BLACK_P(
VALUE obj)
1882 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1887RVALUE_GREY_P(
VALUE obj)
1889 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1894RVALUE_WHITE_P(
VALUE obj)
1896 return RVALUE_MARKED(obj) == FALSE;
1906 return calloc(1, n);
1910rb_objspace_alloc(
void)
1913 objspace->flags.measure_gc = 1;
1914 malloc_limit = gc_params.malloc_limit_min;
1916 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
1917 rb_bug(
"Could not preregister postponed job for GC");
1920 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1923 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1925 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1926 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1929 rb_darray_make_without_gc(&objspace->weak_references, 0);
1943 if (is_lazy_sweeping(objspace))
1944 rb_bug(
"lazy sweeping underway when freeing object space");
1946 free(objspace->profile.records);
1947 objspace->profile.records = NULL;
1951 for (list = global_list; list; list = next) {
1956 if (heap_pages_sorted) {
1958 size_t total_heap_pages = heap_allocated_pages;
1959 for (i = 0; i < total_heap_pages; ++i) {
1960 heap_page_free(objspace, heap_pages_sorted[i]);
1962 free(heap_pages_sorted);
1963 heap_allocated_pages = 0;
1964 heap_pages_sorted_length = 0;
1965 heap_pages_lomem = 0;
1966 heap_pages_himem = 0;
1968 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1970 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1971 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1974 st_free_table(objspace->id_to_obj_tbl);
1975 st_free_table(objspace->obj_to_id_tbl);
1977 free_stack_chunks(&objspace->mark_stack);
1978 mark_stack_free_cache(&objspace->mark_stack);
1980 rb_darray_free_without_gc(objspace->weak_references);
1986heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1991 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1994 if (heap_pages_sorted_length > 0) {
1995 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1996 if (sorted) heap_pages_sorted = sorted;
1999 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
2006 heap_pages_sorted_length = next_length;
2017 size_t next_length = heap_allocatable_pages(objspace);
2018 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
2020 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
2021 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
2024 if (next_length > heap_pages_sorted_length) {
2025 heap_pages_expand_sorted_to(objspace, next_length);
2028 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2029 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2035 size_pool->allocatable_pages = s;
2036 heap_pages_expand_sorted(objspace);
2042 ASSERT_vm_locking();
2046 asan_unpoison_object(obj,
false);
2048 asan_unlock_freelist(page);
2050 p->as.free.flags = 0;
2051 p->as.free.next = page->freelist;
2053 asan_lock_freelist(page);
2055 RVALUE_AGE_RESET(obj);
2057 if (RGENGC_CHECK_MODE &&
2059 !(page->start <= (uintptr_t)obj &&
2060 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2061 obj % BASE_SLOT_SIZE == 0)) {
2062 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
2065 asan_poison_object(obj);
2066 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
2072 asan_unlock_freelist(page);
2073 GC_ASSERT(page->free_slots != 0);
2074 GC_ASSERT(page->freelist != NULL);
2076 page->free_next = heap->free_pages;
2077 heap->free_pages = page;
2079 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
2081 asan_lock_freelist(page);
2087 asan_unlock_freelist(page);
2088 GC_ASSERT(page->free_slots != 0);
2089 GC_ASSERT(page->freelist != NULL);
2091 page->free_next = heap->pooled_pages;
2092 heap->pooled_pages = page;
2093 objspace->rincgc.pooled_slots += page->free_slots;
2095 asan_lock_freelist(page);
2101 ccan_list_del(&page->page_node);
2102 heap->total_pages--;
2103 heap->total_slots -= page->total_slots;
2106static void rb_aligned_free(
void *ptr,
size_t size);
2111 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2113 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2115 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2116 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2117 rb_bug(
"heap_page_body_free: munmap failed");
2122 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2129 heap_allocated_pages--;
2130 page->size_pool->total_freed_pages++;
2131 heap_page_body_free(GET_PAGE_BODY(page->start));
2140 bool has_pages_in_tomb_heap = FALSE;
2141 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2142 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2143 has_pages_in_tomb_heap = TRUE;
2148 if (has_pages_in_tomb_heap) {
2149 for (i = j = 0; j < heap_allocated_pages; i++) {
2150 struct heap_page *page = heap_pages_sorted[i];
2152 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2153 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2154 heap_page_free(objspace, page);
2158 heap_pages_sorted[j] = page;
2164 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2165 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2166 GC_ASSERT(himem <= heap_pages_himem);
2167 heap_pages_himem = himem;
2169 struct heap_page *lopage = heap_pages_sorted[0];
2170 uintptr_t lomem = (uintptr_t)lopage->start;
2171 GC_ASSERT(lomem >= heap_pages_lomem);
2172 heap_pages_lomem = lomem;
2174 GC_ASSERT(j == heap_allocated_pages);
2179heap_page_body_allocate(
void)
2183 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2185 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2187 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2188 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2189 if (ptr == MAP_FAILED) {
2193 char *aligned = ptr + HEAP_PAGE_ALIGN;
2194 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2195 GC_ASSERT(aligned > ptr);
2196 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2198 size_t start_out_of_range_size = aligned - ptr;
2199 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2200 if (start_out_of_range_size > 0) {
2201 if (munmap(ptr, start_out_of_range_size)) {
2202 rb_bug(
"heap_page_body_allocate: munmap failed for start");
2206 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2207 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2208 if (end_out_of_range_size > 0) {
2209 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2210 rb_bug(
"heap_page_body_allocate: munmap failed for end");
2218 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2221 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2229 uintptr_t start, end, p;
2231 uintptr_t hi, lo, mid;
2232 size_t stride = size_pool->slot_size;
2233 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(
int)stride;
2237 if (page_body == 0) {
2242 page = calloc1(
sizeof(
struct heap_page));
2244 heap_page_body_free(page_body);
2251 if (start % BASE_SLOT_SIZE != 0) {
2252 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2253 start = start + delta;
2254 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2260 if (NUM_IN_PAGE(start) == 1) {
2261 start += stride - BASE_SLOT_SIZE;
2264 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2266 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2268 end = start + (limit * (int)stride);
2272 hi = (uintptr_t)heap_allocated_pages;
2276 mid = (lo + hi) / 2;
2277 mid_page = heap_pages_sorted[mid];
2278 if ((uintptr_t)mid_page->start < start) {
2281 else if ((uintptr_t)mid_page->start > start) {
2285 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2289 if (hi < (uintptr_t)heap_allocated_pages) {
2290 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2293 heap_pages_sorted[hi] = page;
2295 heap_allocated_pages++;
2297 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2298 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2299 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2301 size_pool->total_allocated_pages++;
2303 if (heap_allocated_pages > heap_pages_sorted_length) {
2304 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2305 heap_allocated_pages, heap_pages_sorted_length);
2308 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2309 if (heap_pages_himem < end) heap_pages_himem = end;
2311 page->start = start;
2312 page->total_slots = limit;
2313 page->slot_size = size_pool->slot_size;
2314 page->size_pool = size_pool;
2315 page_body->header.page = page;
2317 for (p = start; p != end; p += stride) {
2318 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2319 heap_page_add_freeobj(objspace, page, (
VALUE)p);
2321 page->free_slots = limit;
2323 asan_lock_freelist(page);
2332 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2333 asan_unlock_freelist(page);
2334 if (page->freelist != NULL) {
2335 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2336 asan_lock_freelist(page);
2348 const char *method =
"recycle";
2350 size_pool->allocatable_pages--;
2352 page = heap_page_resurrect(objspace, size_pool);
2355 page = heap_page_allocate(objspace, size_pool);
2356 method =
"allocate";
2358 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2359 "heap_allocated_pages: %"PRIdSIZE
", "
2360 "heap_allocated_pages: %"PRIdSIZE
", "
2361 "tomb->total_pages: %"PRIdSIZE
"\n",
2362 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2370 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2371 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2372 ccan_list_add_tail(&heap->pages, &page->page_node);
2373 heap->total_pages++;
2374 heap->total_slots += page->total_slots;
2380 struct heap_page *page = heap_page_create(objspace, size_pool);
2381 heap_add_page(objspace, size_pool, heap, page);
2382 heap_add_freepage(heap, page);
2385#if GC_CAN_COMPILE_COMPACTION
2391 size_pool_allocatable_pages_set(objspace, size_pool, add);
2393 for (i = 0; i < add; i++) {
2394 heap_assign_page(objspace, size_pool, heap);
2397 GC_ASSERT(size_pool->allocatable_pages == 0);
2404 size_t multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2408 size_t slots_per_page = (HEAP_PAGE_OBJ_LIMIT / multiple) - 1;
2409 return CEILDIV(slots, slots_per_page);
2415 size_t size_pool_idx = size_pool - size_pools;
2416 size_t init_slots = gc_params.size_pool_init_slots[size_pool_idx];
2417 return slots_to_pages_for_size_pool(objspace, size_pool, init_slots);
2423 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2426 if (goal_ratio == 0.0) {
2427 next_used = (size_t)(used * gc_params.growth_factor);
2429 else if (total_slots == 0) {
2430 next_used = minimum_pages_for_size_pool(objspace, size_pool);
2436 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2438 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2439 if (f < 1.0) f = 1.1;
2441 next_used = (size_t)(f * used);
2445 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2446 " G(%1.2f), f(%1.2f),"
2447 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2448 free_slots, total_slots, free_slots/(
double)total_slots,
2449 goal_ratio, f, used, next_used);
2453 if (gc_params.growth_max_slots > 0) {
2454 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2455 if (next_used > max_used) next_used = max_used;
2458 size_t extend_page_count = next_used - used;
2460 if (extend_page_count == 0) extend_page_count = 1;
2462 return extend_page_count;
2468 if (size_pool->allocatable_pages > 0) {
2469 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2470 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2471 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2473 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2474 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2476 heap_assign_page(objspace, size_pool, heap);
2485 unsigned int lock_lev;
2486 gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2489 if (is_incremental_marking(objspace)) {
2490 if (gc_marks_continue(objspace, size_pool, heap)) {
2497 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2498 gc_sweep_continue(objspace, size_pool, heap);
2501 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2507 GC_ASSERT(heap->free_pages == NULL);
2510 gc_continue(objspace, size_pool, heap);
2514 if (heap->free_pages == NULL &&
2515 (will_be_incremental_marking(objspace) ||
2516 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2517 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2522 gc_continue(objspace, size_pool, heap);
2527 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2528 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2529 rb_bug(
"cannot create a new page after GC");
2532 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2537 gc_continue(objspace, size_pool, heap);
2539 if (heap->free_pages == NULL &&
2540 !heap_increment(objspace, size_pool, heap)) {
2541 rb_bug(
"cannot create a new page after major GC");
2549 GC_ASSERT(heap->free_pages != NULL);
2563 if (UNLIKELY(!ec->cfp))
return;
2564 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2567#define gc_event_newobj_hook_needed_p(objspace) ((objspace)->flags.has_newobj_hook)
2568#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2570#define gc_event_hook_prep(objspace, event, data, prep) do { \
2571 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2573 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2577#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2582#if !__has_feature(memory_sanitizer)
2587 p->as.basic.
flags = flags;
2592 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2595#if RACTOR_CHECK_MODE
2596 rb_ractor_setup_belonging(obj);
2599#if RGENGC_CHECK_MODE
2600 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2602 RB_VM_LOCK_ENTER_NO_BARRIER();
2604 check_rvalue_consistency(obj);
2606 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2607 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2608 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2609 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2611 if (RVALUE_REMEMBERED((
VALUE)obj)) rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2613 RB_VM_LOCK_LEAVE_NO_BARRIER();
2616 if (UNLIKELY(wb_protected == FALSE)) {
2617 ASSERT_vm_locking();
2618 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2623 objspace->profile.total_generated_normal_object_count++;
2624#if RGENGC_PROFILE >= 2
2625 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2629 objspace->profile.total_generated_shady_object_count++;
2630#if RGENGC_PROFILE >= 2
2631 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2637 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2641 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2648rb_gc_obj_slot_size(
VALUE obj)
2650 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2654size_pool_slot_size(
unsigned char pool_id)
2656 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2658 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2660#if RGENGC_CHECK_MODE
2662 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2665 slot_size -= RVALUE_OVERHEAD;
2671rb_size_pool_slot_size(
unsigned char pool_id)
2673 return size_pool_slot_size(pool_id);
2677rb_gc_size_allocatable_p(
size_t size)
2679 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2684 size_t size_pool_idx)
2687 RVALUE *p = size_pool_cache->freelist;
2689 if (is_incremental_marking(objspace)) {
2691 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2696 cache->incremental_mark_step_allocated_slots++;
2702 MAYBE_UNUSED(
const size_t) stride = size_pool_slot_size(size_pool_idx);
2703 size_pool_cache->freelist = p->as.free.next;
2704 asan_unpoison_memory_region(p, stride,
true);
2705#if RGENGC_CHECK_MODE
2706 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2708 MEMZERO((
char *)obj,
char, stride);
2720 ASSERT_vm_locking();
2724 if (heap->free_pages == NULL) {
2725 heap_prepare(objspace, size_pool, heap);
2728 page = heap->free_pages;
2729 heap->free_pages = page->free_next;
2731 GC_ASSERT(page->free_slots != 0);
2732 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2734 asan_unlock_freelist(page);
2743 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2747 GC_ASSERT(size_pool_cache->freelist == NULL);
2748 GC_ASSERT(page->free_slots != 0);
2749 GC_ASSERT(page->freelist != NULL);
2751 size_pool_cache->using_page = page;
2752 size_pool_cache->freelist = page->freelist;
2753 page->free_slots = 0;
2754 page->freelist = NULL;
2756 asan_unpoison_object((
VALUE)size_pool_cache->freelist,
false);
2757 GC_ASSERT(RB_TYPE_P((
VALUE)size_pool_cache->freelist,
T_NONE));
2758 asan_poison_object((
VALUE)size_pool_cache->freelist);
2765 p->as.values.v1 = v1;
2766 p->as.values.v2 = v2;
2767 p->as.values.v3 = v3;
2772size_pool_idx_for_size(
size_t size)
2774 size += RVALUE_OVERHEAD;
2776 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2779 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2781 if (size_pool_idx >= SIZE_POOL_COUNT) {
2782 rb_bug(
"size_pool_idx_for_size: allocation size too large "
2783 "(size=%"PRIuSIZE
"u, size_pool_idx=%"PRIuSIZE
"u)", size, size_pool_idx);
2786#if RGENGC_CHECK_MODE
2788 GC_ASSERT(size <= (
size_t)size_pools[size_pool_idx].slot_size);
2789 if (size_pool_idx > 0) GC_ASSERT(size > (
size_t)size_pools[size_pool_idx - 1].slot_size);
2792 return size_pool_idx;
2799 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2802 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2804 if (UNLIKELY(obj ==
Qfalse)) {
2806 bool unlock_vm =
false;
2809 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2815 ASSERT_vm_locking();
2817 if (is_incremental_marking(objspace)) {
2818 gc_continue(objspace, size_pool, heap);
2819 cache->incremental_mark_step_allocated_slots = 0;
2822 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2827 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2828 ractor_cache_set_page(cache, size_pool_idx, page);
2831 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2833 GC_ASSERT(obj !=
Qfalse);
2838 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2842 size_pool->total_allocated_objects++;
2848newobj_zero_slot(
VALUE obj)
2850 memset((
char *)obj +
sizeof(
struct RBasic), 0, rb_gc_obj_slot_size(obj) -
sizeof(
struct RBasic));
2861 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2863 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2867 rb_bug(
"object allocation during garbage collection phase");
2870 if (ruby_gc_stressful) {
2871 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2877 obj = newobj_alloc(objspace, cr, size_pool_idx,
true);
2878 newobj_init(klass, flags, wb_protected, objspace, obj);
2882 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2887NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2889NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2895 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2901 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2910 RB_DEBUG_COUNTER_INC(obj_newobj);
2911 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2913 if (UNLIKELY(stress_to_class)) {
2915 for (i = 0; i < cnt; ++i) {
2916 if (klass ==
RARRAY_AREF(stress_to_class, i)) rb_memerror();
2920 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2923 flags |= (
VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2926 if (!UNLIKELY(during_gc ||
2927 ruby_gc_stressful ||
2928 gc_event_newobj_hook_needed_p(objspace)) &&
2930 obj = newobj_alloc(objspace, cr, size_pool_idx,
false);
2931 newobj_init(klass, flags, wb_protected, objspace, obj);
2934 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2936 obj = wb_protected ?
2937 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2938 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2947 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2948 return newobj_fill(obj, v1, v2, v3);
2952rb_wb_unprotected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2955 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
2962 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2970 return newobj_of(GET_RACTOR(), 0,
T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2974rb_class_instance_allocate_internal(
VALUE klass,
VALUE flags,
bool wb_protected)
2977 GC_ASSERT(flags & ROBJECT_EMBED);
2980 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2983 if (!rb_gc_size_allocatable_p(size)) {
2984 size =
sizeof(
struct RObject);
2987 VALUE obj = newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, wb_protected, size);
2992 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2997 for (
size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
3016#define UNEXPECTED_NODE(func) \
3017 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
3018 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3021rb_imemo_name(
enum imemo_type
type)
3025#define IMEMO_NAME(x) case imemo_##x: return #x;
3029 IMEMO_NAME(throw_data);
3036 IMEMO_NAME(parser_strterm);
3037 IMEMO_NAME(callinfo);
3038 IMEMO_NAME(callcache);
3039 IMEMO_NAME(constcache);
3050 size_t size = RVALUE_SIZE;
3052 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, TRUE, size);
3060 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, FALSE, size);
3064rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
3066 return rb_imemo_tmpbuf_new((
VALUE)buf, 0, (
VALUE)cnt, 0);
3076imemo_memsize(
VALUE obj)
3079 switch (imemo_type(obj)) {
3081 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
3084 size += rb_iseq_memsize((
rb_iseq_t *)obj);
3087 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
3090 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
3093 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3097 case imemo_throw_data:
3100 case imemo_parser_strterm:
3113 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
3114 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
3120rb_class_allocate_instance(
VALUE klass)
3126rb_data_object_check(
VALUE klass)
3128 if (klass != rb_cObject && (
rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3130 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3138 if (klass) rb_data_object_check(klass);
3145 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3153 RBIMPL_NONNULL_ARG(
type);
3154 if (klass) rb_data_object_check(klass);
3156 return newobj_of(GET_RACTOR(), klass,
T_DATA, (
VALUE)
type, 1 | typed_flag, (
VALUE)datap, wb_protected, size);
3162 if (UNLIKELY(
type->flags & RUBY_TYPED_EMBEDDABLE)) {
3163 rb_raise(
rb_eTypeError,
"Cannot wrap an embeddable TypedData");
3166 return typed_data_alloc(klass, 0, datap,
type,
sizeof(
struct RTypedData));
3172 if (
type->flags & RUBY_TYPED_EMBEDDABLE) {
3173 if (!(
type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
3174 rb_raise(
rb_eTypeError,
"Embeddable TypedData must be freed immediately");
3177 size_t embed_size = offsetof(
struct RTypedData, data) + size;
3178 if (rb_gc_size_allocatable_p(embed_size)) {
3179 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0,
type, embed_size);
3180 memset((
char *)obj + offsetof(
struct RTypedData, data), 0, size);
3191rb_objspace_data_type_memsize(
VALUE obj)
3196 const void *ptr = RTYPEDDATA_GET_DATA(obj);
3198 if (
RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3199#ifdef HAVE_MALLOC_USABLE_SIZE
3200 size += malloc_usable_size((
void *)ptr);
3204 if (ptr &&
type->function.dsize) {
3205 size +=
type->function.dsize(ptr);
3213rb_objspace_data_type_name(
VALUE obj)
3224ptr_in_page_body_p(
const void *ptr,
const void *memb)
3227 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3229 if ((uintptr_t)ptr >= p_body) {
3230 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3243 if (ptr < (uintptr_t)heap_pages_lomem ||
3244 ptr > (uintptr_t)heap_pages_himem) {
3248 res = bsearch((
void *)ptr, heap_pages_sorted,
3249 (
size_t)heap_allocated_pages,
sizeof(
struct heap_page *),
3250 ptr_in_page_body_p);
3260PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
3264 register uintptr_t p = (uintptr_t)ptr;
3267 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3269 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
3270 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3272 if (p % BASE_SLOT_SIZE != 0)
return FALSE;
3273 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3275 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3277 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3278 if (page->flags.in_tomb) {
3282 if (p < page->start)
return FALSE;
3283 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
3284 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0)
return FALSE;
3292static enum rb_id_table_iterator_result
3293free_const_entry_i(
VALUE value,
void *data)
3297 return ID_TABLE_CONTINUE;
3303 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3304 rb_id_table_free(tbl);
3313 for (
int i=0; i<ccs->len; i++) {
3316 void *ptr = asan_unpoison_object_temporary((
VALUE)cc);
3318 if (is_pointer_to_heap(objspace, (
void *)cc) &&
3319 IMEMO_TYPE_P(cc, imemo_callcache) &&
3320 cc->klass == klass) {
3325 asan_poison_object((
VALUE)cc);
3330 asan_poison_object((
VALUE)cc);
3334 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
3335 vm_cc_invalidate(cc);
3337 ruby_xfree(ccs->entries);
3345 RB_DEBUG_COUNTER_INC(ccs_free);
3346 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
3355static enum rb_id_table_iterator_result
3356cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
3360 VM_ASSERT(vm_ccs_p(ccs));
3361 VM_ASSERT(
id == ccs->cme->called_id);
3363 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3364 rb_vm_ccs_free(ccs);
3365 return ID_TABLE_DELETE;
3368 gc_mark(data->objspace, (
VALUE)ccs->cme);
3370 for (
int i=0; i<ccs->len; i++) {
3371 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3372 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3374 gc_mark(data->objspace, (
VALUE)ccs->entries[i].ci);
3375 gc_mark(data->objspace, (
VALUE)ccs->entries[i].cc);
3377 return ID_TABLE_CONTINUE;
3384 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3387 .objspace = objspace,
3390 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3394static enum rb_id_table_iterator_result
3395cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
3399 VM_ASSERT(vm_ccs_p(ccs));
3400 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3401 return ID_TABLE_CONTINUE;
3407 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3411 .objspace = objspace,
3415 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3416 rb_id_table_free(cc_tbl);
3420static enum rb_id_table_iterator_result
3421cvar_table_free_i(
VALUE value,
void * ctx)
3423 xfree((
void *) value);
3424 return ID_TABLE_CONTINUE;
3428rb_cc_table_free(
VALUE klass)
3436 struct RZombie *zombie = RZOMBIE(obj);
3438 zombie->dfree = dfree;
3439 zombie->data = data;
3440 VALUE prev, next = heap_pages_deferred_final;
3442 zombie->next = prev = next;
3444 }
while (next != prev);
3446 struct heap_page *page = GET_HEAP_PAGE(obj);
3447 page->final_slots++;
3448 heap_pages_final_slots++;
3454 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3455 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3461 ASSERT_vm_locking();
3462 st_data_t o = (st_data_t)obj,
id;
3467 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3469 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3472 rb_bug(
"Object ID seen, but not in mapping table: %s", obj_info(obj));
3481 int free_immediately =
false;
3482 void (*dfree)(
void *);
3485 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3486 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3489 dfree = RANY(obj)->as.data.dfree;
3494 if (!RTYPEDDATA_EMBEDDED_P(obj)) {
3496 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3499 else if (free_immediately) {
3505 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3508 make_zombie(objspace, obj, dfree, data);
3509 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3514 RB_DEBUG_COUNTER_INC(obj_data_empty);
3524 RB_DEBUG_COUNTER_INC(obj_free);
3534 rb_bug(
"obj_free() called for broken object");
3546 obj_free_object_id(objspace, obj);
3549 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3551#if RGENGC_CHECK_MODE
3552#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3553 CHECK(RVALUE_WB_UNPROTECTED);
3554 CHECK(RVALUE_MARKED);
3555 CHECK(RVALUE_MARKING);
3556 CHECK(RVALUE_UNCOLLECTIBLE);
3562 if (rb_shape_obj_too_complex(obj)) {
3563 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3564 st_free_table(ROBJECT_IV_HASH(obj));
3566 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3567 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3570 xfree(RANY(obj)->as.object.as.heap.ivptr);
3571 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3576 rb_id_table_free(RCLASS_M_TBL(obj));
3577 cc_table_free(objspace, obj, FALSE);
3578 if (rb_shape_obj_too_complex(obj)) {
3579 st_free_table((
st_table *)RCLASS_IVPTR(obj));
3581 else if (RCLASS_IVPTR(obj)) {
3582 xfree(RCLASS_IVPTR(obj));
3585 if (RCLASS_CONST_TBL(obj)) {
3586 rb_free_const_table(RCLASS_CONST_TBL(obj));
3588 if (RCLASS_CVC_TBL(obj)) {
3589 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3590 rb_id_table_free(RCLASS_CVC_TBL(obj));
3592 rb_class_remove_subclass_head(obj);
3593 rb_class_remove_from_module_subclasses(obj);
3594 rb_class_remove_from_super_subclasses(obj);
3595 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3596 xfree(RCLASS_SUPERCLASSES(obj));
3609#if USE_DEBUG_COUNTER
3612 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3615 RB_DEBUG_COUNTER_INC(obj_hash_1);
3618 RB_DEBUG_COUNTER_INC(obj_hash_2);
3621 RB_DEBUG_COUNTER_INC(obj_hash_3);
3624 RB_DEBUG_COUNTER_INC(obj_hash_4);
3630 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3634 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3637 if (RHASH_AR_TABLE_P(obj)) {
3638 if (RHASH_AR_TABLE(obj) == NULL) {
3639 RB_DEBUG_COUNTER_INC(obj_hash_null);
3642 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3646 RB_DEBUG_COUNTER_INC(obj_hash_st);
3653 if (RANY(obj)->as.regexp.ptr) {
3654 onig_free(RANY(obj)->as.regexp.ptr);
3655 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3659 if (!rb_data_free(objspace, obj))
return false;
3664#if USE_DEBUG_COUNTER
3665 if (rm->
regs.num_regs >= 8) {
3666 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3668 else if (rm->
regs.num_regs >= 4) {
3669 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3671 else if (rm->
regs.num_regs >= 1) {
3672 RB_DEBUG_COUNTER_INC(obj_match_under4);
3675 onig_region_free(&rm->
regs, 0);
3679 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3683 if (RANY(obj)->as.file.fptr) {
3684 make_io_zombie(objspace, obj);
3685 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3690 RB_DEBUG_COUNTER_INC(obj_rational);
3693 RB_DEBUG_COUNTER_INC(obj_complex);
3699 if (RICLASS_OWNS_M_TBL_P(obj)) {
3701 rb_id_table_free(RCLASS_M_TBL(obj));
3703 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3704 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3706 rb_class_remove_subclass_head(obj);
3707 cc_table_free(objspace, obj, FALSE);
3708 rb_class_remove_from_module_subclasses(obj);
3709 rb_class_remove_from_super_subclasses(obj);
3711 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3715 RB_DEBUG_COUNTER_INC(obj_float);
3719 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3720 xfree(BIGNUM_DIGITS(obj));
3721 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3724 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3729 UNEXPECTED_NODE(obj_free);
3733 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3734 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3735 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3738 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3739 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3745 rb_gc_free_dsymbol(obj);
3746 RB_DEBUG_COUNTER_INC(obj_symbol);
3751 switch (imemo_type(obj)) {
3753 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3754 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3757 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3758 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3761 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3763 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3766 xfree(RANY(obj)->as.imemo.alloc.ptr);
3767 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3770 rb_ast_free(&RANY(obj)->as.imemo.ast);
3771 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3774 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3777 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3779 case imemo_throw_data:
3780 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3783 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3786 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3788 case imemo_parser_strterm:
3789 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3791 case imemo_callinfo:
3796 if (ci->kwarg->references == 0)
xfree((
void *)ci->kwarg);
3798 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3801 case imemo_callcache:
3802 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3804 case imemo_constcache:
3805 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3811 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3816 make_zombie(objspace, obj, 0, 0);
3825#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3826#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3829object_id_cmp(st_data_t x, st_data_t y)
3831 if (RB_BIGNUM_TYPE_P(x)) {
3832 return !rb_big_eql(x, y);
3840object_id_hash(st_data_t n)
3842 if (RB_BIGNUM_TYPE_P(n)) {
3846 return st_numhash(n);
3849static const struct st_hash_type object_id_hash_type = {
3859#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3861 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3864 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3865 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3866 objspace->obj_to_id_tbl = st_init_numtable();
3868#if RGENGC_ESTIMATE_OLDMALLOC
3869 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3873 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3877 gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
3879 size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
3881 heap_pages_expand_sorted(objspace);
3883 init_mark_stack(&objspace->mark_stack);
3885 objspace->profile.invoke_time = getrusage_time();
3886 finalizer_table = st_init_numtable();
3894 gc_stress_set(objspace, ruby_initial_gc_stress);
3897typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3898typedef int each_page_callback(
struct heap_page *,
void *);
3900static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3901static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3905 bool reenable_incremental;
3907 each_obj_callback *each_obj_callback;
3908 each_page_callback *each_page_callback;
3911 struct heap_page **pages[SIZE_POOL_COUNT];
3912 size_t pages_counts[SIZE_POOL_COUNT];
3916objspace_each_objects_ensure(
VALUE arg)
3922 if (data->reenable_incremental) {
3923 objspace->flags.dont_incremental = FALSE;
3926 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3927 struct heap_page **pages = data->pages[i];
3935objspace_each_objects_try(
VALUE arg)
3941 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3943 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *),
rb_eRuntimeError);
3945 struct heap_page **pages = malloc(size);
3946 if (!pages) rb_memerror();
3954 size_t pages_count = 0;
3955 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3956 pages[pages_count] = page;
3959 data->pages[i] = pages;
3960 data->pages_counts[i] = pages_count;
3961 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3964 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3966 size_t pages_count = data->pages_counts[i];
3967 struct heap_page **pages = data->pages[i];
3969 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3970 for (
size_t i = 0; i < pages_count; i++) {
3973 if (page == NULL)
break;
3977 if (pages[i] != page)
continue;
3979 uintptr_t pstart = (uintptr_t)page->start;
3980 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3982 if (!__asan_region_is_poisoned((
void *)pstart, pend - pstart)) {
3983 if (data->each_obj_callback &&
3984 (*data->each_obj_callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3987 if (data->each_page_callback &&
3988 (*data->each_page_callback)(page, data->data)) {
3993 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
4039rb_objspace_each_objects(each_obj_callback *callback,
void *data)
4041 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
4049 bool reenable_incremental = FALSE;
4051 reenable_incremental = !objspace->flags.dont_incremental;
4054 objspace->flags.dont_incremental = TRUE;
4065objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
4068 .objspace = objspace,
4069 .each_obj_callback = callback,
4070 .each_page_callback = NULL,
4077objspace_each_pages(
rb_objspace_t *objspace, each_page_callback *callback,
void *data,
bool protected)
4080 .objspace = objspace,
4081 .each_obj_callback = NULL,
4082 .each_page_callback = callback,
4089rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
4091 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
4100internal_object_p(
VALUE obj)
4103 void *ptr = asan_unpoison_object_temporary(obj);
4104 bool used_p = p->as.basic.
flags;
4109 UNEXPECTED_NODE(internal_object_p);
4118 if (!p->as.basic.
klass)
break;
4120 return rb_singleton_class_internal_p(obj);
4124 if (!p->as.basic.
klass)
break;
4128 if (ptr || ! used_p) {
4129 asan_poison_object(obj);
4135rb_objspace_internal_object_p(
VALUE obj)
4137 return internal_object_p(obj);
4141os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
4146 for (; v != (
VALUE)vend; v += stride) {
4147 if (!internal_object_p(v)) {
4167 rb_objspace_each_objects(os_obj_of_i, &oes);
4214 return os_obj_of(of);
4228 return rb_undefine_finalizer(obj);
4235 st_data_t data = obj;
4237 st_delete(finalizer_table, &data, 0);
4243should_be_callable(
VALUE block)
4246 rb_raise(rb_eArgError,
"wrong type argument %"PRIsVALUE
" (should be callable)",
4252should_be_finalizable(
VALUE obj)
4255 rb_raise(rb_eArgError,
"cannot define finalizer for %s",
4262rb_define_finalizer_no_check(
VALUE obj,
VALUE block)
4270 if (st_lookup(finalizer_table, obj, &data)) {
4271 table = (
VALUE)data;
4278 for (i = 0; i <
len; i++) {
4287 rb_ary_push(table, block);
4291 RBASIC_CLEAR_CLASS(table);
4292 st_add_direct(finalizer_table, obj, table);
4368 should_be_finalizable(obj);
4373 should_be_callable(block);
4376 if (rb_callable_receiver(block) == obj) {
4377 rb_warn(
"finalizer references object to be finalized");
4380 return rb_define_finalizer_no_check(obj, block);
4386 should_be_finalizable(obj);
4387 should_be_callable(block);
4388 return rb_define_finalizer_no_check(obj, block);
4399 if (st_lookup(finalizer_table, obj, &data)) {
4400 table = (
VALUE)data;
4401 st_insert(finalizer_table, dest, table);
4416 VALUE errinfo = ec->errinfo;
4417 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
4418 rb_ec_error_print(ec, errinfo);
4426 enum ruby_tag_type state;
4437#define RESTORE_FINALIZER() (\
4438 ec->cfp = saved.cfp, \
4439 ec->cfp->sp = saved.sp, \
4440 ec->errinfo = saved.errinfo)
4442 saved.errinfo = ec->errinfo;
4443 saved.objid = rb_obj_id(obj);
4444 saved.cfp = ec->cfp;
4445 saved.sp = ec->cfp->sp;
4450 state = EC_EXEC_TAG();
4451 if (state != TAG_NONE) {
4453 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4455 for (i = saved.finished;
4457 saved.finished = ++i) {
4458 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4461#undef RESTORE_FINALIZER
4467 st_data_t key, table;
4469 if (RZOMBIE(zombie)->dfree) {
4470 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4473 key = (st_data_t)zombie;
4474 if (st_delete(finalizer_table, &key, &table)) {
4475 run_finalizer(objspace, zombie, (
VALUE)table);
4485 asan_unpoison_object(zombie,
false);
4486 next_zombie = RZOMBIE(zombie)->next;
4487 page = GET_HEAP_PAGE(zombie);
4489 run_final(objspace, zombie);
4495 obj_free_object_id(objspace, zombie);
4498 GC_ASSERT(heap_pages_final_slots > 0);
4499 GC_ASSERT(page->final_slots > 0);
4501 heap_pages_final_slots--;
4502 page->final_slots--;
4504 heap_page_add_freeobj(objspace, page, zombie);
4505 page->size_pool->total_freed_objects++;
4509 zombie = next_zombie;
4517 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4518 finalize_list(objspace, zombie);
4526 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4527 finalize_deferred_heap_pages(objspace);
4528 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4532gc_finalize_deferred(
void *dmy)
4535 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4537 finalize_deferred(objspace);
4538 ATOMIC_SET(finalizing, 0);
4553 if (is_incremental_marking(objspace)) {
4556 while (pop_mark_stack(&objspace->mark_stack, &obj));
4558 objspace->flags.during_incremental_marking = FALSE;
4561 if (is_lazy_sweeping(objspace)) {
4562 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4564 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4566 heap->sweeping_page = NULL;
4569 ccan_list_for_each(&heap->pages, page, page_node) {
4570 page->flags.before_sweep =
false;
4575 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4577 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4578 rgengc_mark_and_rememberset_clear(objspace, heap);
4581 gc_mode_set(objspace, gc_mode_none);
4591force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4602bool rb_obj_is_main_ractor(
VALUE gv);
4607 for (
size_t i = 0; i < heap_allocated_pages; i++) {
4608 struct heap_page *page = heap_pages_sorted[i];
4609 short stride = page->slot_size;
4611 uintptr_t p = (uintptr_t)page->start;
4612 uintptr_t pend = p + page->total_slots * stride;
4613 for (; p < pend; p += stride) {
4617 if (rb_obj_is_mutex(vp) || rb_obj_is_thread(vp) || rb_obj_is_main_ractor(vp)) {
4618 obj_free(objspace, vp);
4623 obj_free(objspace, vp);
4638#if RGENGC_CHECK_MODE >= 2
4639 gc_verify_internal_consistency(objspace);
4641 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4644 finalize_deferred(objspace);
4645 GC_ASSERT(heap_pages_deferred_final == 0);
4648 objspace->flags.dont_incremental = 1;
4651 while (finalizer_table->num_entries) {
4653 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4656 st_data_t obj = (st_data_t)curr->obj;
4657 run_finalizer(objspace, curr->obj, curr->table);
4658 st_delete(finalizer_table, &obj, 0);
4671 unsigned int lock_lev;
4672 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4675 for (i = 0; i < heap_allocated_pages; i++) {
4676 struct heap_page *page = heap_pages_sorted[i];
4677 short stride = page->slot_size;
4679 uintptr_t p = (uintptr_t)page->start;
4680 uintptr_t pend = p + page->total_slots * stride;
4681 for (; p < pend; p += stride) {
4683 void *poisoned = asan_unpoison_object_temporary(vp);
4686 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4687 if (rb_obj_is_thread(vp))
break;
4688 if (rb_obj_is_mutex(vp))
break;
4689 if (rb_obj_is_fiber(vp))
break;
4690 if (rb_obj_is_main_ractor(vp))
break;
4692 obj_free(objspace, vp);
4695 obj_free(objspace, vp);
4702 if (rb_free_at_exit) {
4703 obj_free(objspace, vp);
4709 asan_poison_object(vp);
4714 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4716 finalize_deferred_heap_pages(objspace);
4718 st_free_table(finalizer_table);
4719 finalizer_table = 0;
4720 ATOMIC_SET(finalizing, 0);
4724is_swept_object(
VALUE ptr)
4726 struct heap_page *page = GET_HEAP_PAGE(ptr);
4727 return page->flags.before_sweep ? FALSE : TRUE;
4734 if (!is_lazy_sweeping(objspace) ||
4735 is_swept_object(ptr) ||
4736 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4757 if (!is_garbage_object(objspace, ptr)) {
4766is_markable_object(
VALUE obj)
4769 check_rvalue_consistency(obj);
4774rb_objspace_markable_object_p(
VALUE obj)
4777 return is_markable_object(obj) && is_live_object(objspace, obj);
4781rb_objspace_garbage_object_p(
VALUE obj)
4784 return is_garbage_object(objspace, obj);
4788rb_gc_is_ptr_to_obj(
void *ptr)
4791 return is_pointer_to_heap(objspace, ptr);
4795rb_gc_id2ref_obj_tbl(
VALUE objid)
4800 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4826#if SIZEOF_LONG == SIZEOF_VOIDP
4827#define NUM2PTR(x) NUM2ULONG(x)
4828#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4829#define NUM2PTR(x) NUM2ULL(x)
4837 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4838 ptr = NUM2PTR(objid);
4845 ptr = obj_id_to_ref(objid);
4846 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4849 if (!rb_static_id_valid_p(symid))
4855 if (!UNDEF_P(orig = rb_gc_id2ref_obj_tbl(objid)) &&
4856 is_live_object(objspace, orig)) {
4862 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4866 if (rb_int_ge(objid, objspace->next_object_id)) {
4867 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is not id value", rb_int2str(objid, 10));
4870 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is recycled object", rb_int2str(objid, 10));
4878 return id2ref(objid);
4888#if SIZEOF_LONG == SIZEOF_VOIDP
4898 return get_heap_object_id(obj);
4902cached_object_id(
VALUE obj)
4908 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4914 id = objspace->next_object_id;
4915 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4917 VALUE already_disabled = rb_gc_disable_no_rest();
4918 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4919 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4920 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4929nonspecial_obj_id(
VALUE obj)
4931#if SIZEOF_LONG == SIZEOF_VOIDP
4933#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4936# error not supported
4943 return rb_find_object_id(obj, nonspecial_obj_id);
5005 return rb_find_object_id(obj, cached_object_id);
5008static enum rb_id_table_iterator_result
5009cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
5011 size_t *total_size = data_ptr;
5013 *total_size +=
sizeof(*ccs);
5014 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
5015 return ID_TABLE_CONTINUE;
5021 size_t total = rb_id_table_memsize(cc_table);
5022 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
5027obj_memsize_of(
VALUE obj,
int use_all_types)
5036 size += rb_generic_ivar_memsize(obj);
5041 if (rb_shape_obj_too_complex(obj)) {
5042 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
5044 else if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
5045 size += ROBJECT_IV_CAPACITY(obj) *
sizeof(
VALUE);
5050 if (RCLASS_M_TBL(obj)) {
5051 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5054 size +=
SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
5055 if (RCLASS_CVC_TBL(obj)) {
5056 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
5058 if (RCLASS_EXT(obj)->const_tbl) {
5059 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
5061 if (RCLASS_CC_TBL(obj)) {
5062 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5064 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
5065 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) *
sizeof(
VALUE);
5069 if (RICLASS_OWNS_M_TBL_P(obj)) {
5070 if (RCLASS_M_TBL(obj)) {
5071 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5074 if (RCLASS_CC_TBL(obj)) {
5075 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5079 size += rb_str_memsize(obj);
5082 size += rb_ary_memsize(obj);
5085 if (RHASH_ST_TABLE_P(obj)) {
5086 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
5088 size += st_memsize(RHASH_ST_TABLE(obj)) -
sizeof(
st_table);
5097 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
5102 size += onig_region_memsize(&rm->
regs);
5107 if (
RFILE(obj)->fptr) {
5108 size += rb_io_memsize(
RFILE(obj)->fptr);
5115 size += imemo_memsize(obj);
5123 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
5124 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
5129 UNEXPECTED_NODE(obj_memsize_of);
5133 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
5134 RSTRUCT(obj)->as.heap.ptr) {
5135 size +=
sizeof(
VALUE) * RSTRUCT_LEN(obj);
5144 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
5148 return size + rb_gc_obj_slot_size(obj);
5152rb_obj_memsize_of(
VALUE obj)
5154 return obj_memsize_of(obj, TRUE);
5158set_zero(st_data_t key, st_data_t val, st_data_t arg)
5162 rb_hash_aset(hash, k,
INT2FIX(0));
5167type_sym(
size_t type)
5170#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5239count_objects(
int argc,
VALUE *argv,
VALUE os)
5250 if (!RB_TYPE_P(hash,
T_HASH))
5254 for (i = 0; i <=
T_MASK; i++) {
5258 for (i = 0; i < heap_allocated_pages; i++) {
5259 struct heap_page *page = heap_pages_sorted[i];
5260 short stride = page->slot_size;
5262 uintptr_t p = (uintptr_t)page->start;
5263 uintptr_t pend = p + page->total_slots * stride;
5264 for (;p < pend; p += stride) {
5266 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5268 void *poisoned = asan_unpoison_object_temporary(vp);
5269 if (RANY(p)->as.basic.flags) {
5277 asan_poison_object(vp);
5280 total += page->total_slots;
5284 hash = rb_hash_new();
5287 rb_hash_stlike_foreach(hash, set_zero, hash);
5292 for (i = 0; i <=
T_MASK; i++) {
5310 size_t total_slots = 0;
5311 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5313 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5314 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5322 return total_allocated_objects(objspace) - total_freed_objects(objspace) - heap_pages_final_slots;
5328 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5332gc_setup_mark_bits(
struct heap_page *page)
5335 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5342enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5348 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5351enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5352#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5358 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5359 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(
errno));
5362 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
5369 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5370 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(
errno));
5373 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
5380 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5382 struct heap_page *src_page = GET_HEAP_PAGE(src);
5390 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5392 asan_unlock_freelist(free_page);
5394 asan_lock_freelist(free_page);
5395 asan_unpoison_object(dest,
false);
5401 asan_unlock_freelist(free_page);
5402 free_page->freelist = RANY(dest)->as.free.next;
5403 asan_lock_freelist(free_page);
5407 if (src_page->slot_size > free_page->slot_size) {
5408 objspace->rcompactor.moved_down_count_table[
BUILTIN_TYPE(src)]++;
5410 else if (free_page->slot_size > src_page->slot_size) {
5411 objspace->rcompactor.moved_up_count_table[
BUILTIN_TYPE(src)]++;
5413 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE(src)]++;
5414 objspace->rcompactor.total_moved++;
5416 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5417 gc_pin(objspace, src);
5418 free_page->free_slots--;
5426 struct heap_page *cursor = heap->compact_cursor;
5429 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5430 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5435#if GC_CAN_COMPILE_COMPACTION
5439#if defined(__MINGW32__) || defined(_WIN32)
5440# define GC_COMPACTION_SUPPORTED 1
5444# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5447#if GC_CAN_COMPILE_COMPACTION
5449read_barrier_handler(uintptr_t original_address)
5455 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5457 obj = (
VALUE)address;
5463 if (page_body == NULL) {
5464 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)original_address);
5469 unlock_page_body(objspace, page_body);
5471 objspace->profile.read_barrier_faults++;
5473 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5479#if !GC_CAN_COMPILE_COMPACTION
5481uninstall_handlers(
void)
5487install_handlers(
void)
5491#elif defined(_WIN32)
5492static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5493typedef void (*signal_handler)(int);
5494static signal_handler old_sigsegv_handler;
5497read_barrier_signal(EXCEPTION_POINTERS * info)
5500 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5505 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5506 return EXCEPTION_CONTINUE_EXECUTION;
5509 return EXCEPTION_CONTINUE_SEARCH;
5514uninstall_handlers(
void)
5516 signal(SIGSEGV, old_sigsegv_handler);
5517 SetUnhandledExceptionFilter(old_handler);
5521install_handlers(
void)
5524 old_sigsegv_handler = signal(SIGSEGV, NULL);
5527 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5530static struct sigaction old_sigbus_handler;
5531static struct sigaction old_sigsegv_handler;
5533#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5534static exception_mask_t old_exception_masks[32];
5535static mach_port_t old_exception_ports[32];
5536static exception_behavior_t old_exception_behaviors[32];
5537static thread_state_flavor_t old_exception_flavors[32];
5538static mach_msg_type_number_t old_exception_count;
5541disable_mach_bad_access_exc(
void)
5543 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
5544 task_swap_exception_ports(
5545 mach_task_self(), EXC_MASK_BAD_ACCESS,
5546 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5547 old_exception_masks, &old_exception_count,
5548 old_exception_ports, old_exception_behaviors, old_exception_flavors
5553restore_mach_bad_access_exc(
void)
5555 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5556 task_set_exception_ports(
5558 old_exception_masks[i], old_exception_ports[i],
5559 old_exception_behaviors[i], old_exception_flavors[i]
5566read_barrier_signal(
int sig, siginfo_t * info,
void * data)
5569 struct sigaction prev_sigbus, prev_sigsegv;
5570 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5571 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5574 sigset_t set, prev_set;
5576 sigaddset(&set, SIGBUS);
5577 sigaddset(&set, SIGSEGV);
5578 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5579#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5580 disable_mach_bad_access_exc();
5583 read_barrier_handler((uintptr_t)info->si_addr);
5586#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5587 restore_mach_bad_access_exc();
5589 sigaction(SIGBUS, &prev_sigbus, NULL);
5590 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5591 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5595uninstall_handlers(
void)
5597#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5598 restore_mach_bad_access_exc();
5600 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5601 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5605install_handlers(
void)
5607 struct sigaction action;
5608 memset(&action, 0,
sizeof(
struct sigaction));
5609 sigemptyset(&action.sa_mask);
5610 action.sa_sigaction = read_barrier_signal;
5611 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5613 sigaction(SIGBUS, &action, &old_sigbus_handler);
5614 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5615#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5616 disable_mach_bad_access_exc();
5624 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5626 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5627 gc_unprotect_pages(objspace, heap);
5630 uninstall_handlers();
5632 gc_update_references(objspace);
5633 objspace->profile.compact_count++;
5635 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5637 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5638 heap->compact_cursor = NULL;
5639 heap->free_pages = NULL;
5640 heap->compact_cursor_index = 0;
5643 if (gc_prof_enabled(objspace)) {
5645 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5647 objspace->flags.during_compacting = FALSE;
5660 struct heap_page * sweep_page = ctx->page;
5661 short slot_size = sweep_page->slot_size;
5662 short slot_bits = slot_size / BASE_SLOT_SIZE;
5663 GC_ASSERT(slot_bits > 0);
5667 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5669 asan_unpoison_object(vp,
false);
5673 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5674#if RGENGC_CHECK_MODE
5675 if (!is_full_marking(objspace)) {
5676 if (RVALUE_OLD_P(vp)) rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5677 if (RVALUE_REMEMBERED(vp)) rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5680 if (obj_free(objspace, vp)) {
5683 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, BASE_SLOT_SIZE);
5684 heap_page_add_freeobj(objspace, sweep_page, vp);
5685 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5694 if (objspace->flags.during_compacting) {
5700 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished");
5702 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5704 heap_page_add_freeobj(objspace, sweep_page, vp);
5715 bitset >>= slot_bits;
5722 struct heap_page *sweep_page = ctx->page;
5723 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5726 bits_t *bits, bitset;
5728 gc_report(2, objspace,
"page_sweep: start.\n");
5730#if RGENGC_CHECK_MODE
5731 if (!objspace->flags.immediate_sweep) {
5732 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5735 sweep_page->flags.before_sweep = FALSE;
5736 sweep_page->free_slots = 0;
5738 p = (uintptr_t)sweep_page->start;
5739 bits = sweep_page->mark_bits;
5741 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5742 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5743 if (out_of_range_bits != 0) {
5744 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5750 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5751 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5752 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5756 bitset >>= NUM_IN_PAGE(p);
5758 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5760 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5762 for (
int i = 1; i < bitmap_plane_count; i++) {
5765 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5767 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5770 if (!heap->compact_cursor) {
5771 gc_setup_mark_bits(sweep_page);
5774#if GC_PROFILE_MORE_DETAIL
5775 if (gc_prof_enabled(objspace)) {
5777 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5778 record->empty_objects += ctx->empty_slots;
5781 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5783 sweep_page->total_slots,
5784 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5786 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5787 sweep_page->size_pool->total_freed_objects += ctx->freed_slots;
5789 if (heap_pages_deferred_final && !finalizing) {
5792 gc_finalize_deferred_register(objspace);
5796#if RGENGC_CHECK_MODE
5797 short freelist_len = 0;
5798 asan_unlock_freelist(sweep_page);
5799 RVALUE *ptr = sweep_page->freelist;
5802 ptr = ptr->as.free.next;
5804 asan_lock_freelist(sweep_page);
5805 if (freelist_len != sweep_page->free_slots) {
5806 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5810 gc_report(2, objspace,
"page_sweep: end.\n");
5814gc_mode_name(
enum gc_mode mode)
5817 case gc_mode_none:
return "none";
5818 case gc_mode_marking:
return "marking";
5819 case gc_mode_sweeping:
return "sweeping";
5820 case gc_mode_compacting:
return "compacting";
5821 default: rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5826gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5828#if RGENGC_CHECK_MODE
5829 enum gc_mode prev_mode = gc_mode(objspace);
5830 switch (prev_mode) {
5831 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5832 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5833 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
5834 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
5837 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5838 gc_mode_set(objspace, mode);
5845 asan_unlock_freelist(page);
5846 if (page->freelist) {
5847 RVALUE *p = page->freelist;
5848 asan_unpoison_object((
VALUE)p,
false);
5849 while (p->as.free.next) {
5851 p = p->as.free.next;
5852 asan_poison_object((
VALUE)prev);
5853 asan_unpoison_object((
VALUE)p,
false);
5855 p->as.free.next = freelist;
5856 asan_poison_object((
VALUE)p);
5859 page->freelist = freelist;
5861 asan_lock_freelist(page);
5868 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
5869 heap->free_pages = NULL;
5870 heap->pooled_pages = NULL;
5871 if (!objspace->flags.immediate_sweep) {
5874 ccan_list_for_each(&heap->pages, page, page_node) {
5875 page->flags.before_sweep = TRUE;
5880#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5884#if GC_CAN_COMPILE_COMPACTION
5885static void gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func);
5886static int compare_pinned_slots(
const void *left,
const void *right,
void *d);
5892 gc_mode_transition(objspace, gc_mode_sweeping);
5893 objspace->rincgc.pooled_slots = 0;
5895#if GC_CAN_COMPILE_COMPACTION
5896 if (objspace->flags.during_compacting) {
5897 gc_sort_heap_by_compare_func(
5899 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
5904 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5906 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5908 gc_sweep_start_heap(objspace, heap);
5911 if (heap->sweeping_page == NULL) {
5912 GC_ASSERT(heap->total_pages == 0);
5913 GC_ASSERT(heap->total_slots == 0);
5914 gc_sweep_finish_size_pool(objspace, size_pool);
5919 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5920 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5927 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5928 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5929 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5930 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5932 size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools];
5933 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
5940 while (swept_slots < min_free_slots &&
5941 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5942 swept_slots += resurrected_page->free_slots;
5944 heap_add_page(objspace, size_pool, heap, resurrected_page);
5945 heap_add_freepage(heap, resurrected_page);
5948 if (swept_slots < min_free_slots) {
5949 bool grow_heap = is_full_marking(objspace);
5953 if (!is_full_marking(objspace) && size_pool->allocatable_pages == 0) {
5955 bool is_growth_heap = size_pool->empty_slots == 0 || size_pool->freed_slots > size_pool->empty_slots;
5960 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE ||
5961 total_slots < init_slots) {
5964 else if (is_growth_heap) {
5965 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5966 size_pool->force_major_gc_count++;
5971 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5973 if (extend_page_count > size_pool->allocatable_pages) {
5974 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5983 gc_report(1, objspace,
"gc_sweep_finish\n");
5985 gc_prof_set_heap_info(objspace);
5986 heap_pages_free_unused_pages(objspace);
5988 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5992 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5993 if (size_pool->allocatable_pages < tomb_pages) {
5994 size_pool->allocatable_pages = tomb_pages;
5997 size_pool->freed_slots = 0;
5998 size_pool->empty_slots = 0;
6000 if (!will_be_incremental_marking(objspace)) {
6001 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
6002 struct heap_page *end_page = eden_heap->free_pages;
6004 while (end_page->free_next) end_page = end_page->free_next;
6005 end_page->free_next = eden_heap->pooled_pages;
6008 eden_heap->free_pages = eden_heap->pooled_pages;
6010 eden_heap->pooled_pages = NULL;
6011 objspace->rincgc.pooled_slots = 0;
6014 heap_pages_expand_sorted(objspace);
6017 gc_mode_transition(objspace, gc_mode_none);
6019#if RGENGC_CHECK_MODE >= 2
6020 gc_verify_internal_consistency(objspace);
6027 struct heap_page *sweep_page = heap->sweeping_page;
6028 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
6029 int swept_slots = 0;
6030 int pooled_slots = 0;
6032 if (sweep_page == NULL)
return FALSE;
6034#if GC_ENABLE_LAZY_SWEEP
6035 gc_prof_sweep_timer_start(objspace);
6039 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
6047 gc_sweep_page(objspace, heap, &ctx);
6048 int free_slots = ctx.freed_slots + ctx.empty_slots;
6050 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
6052 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
6053 heap_pages_freeable_pages > 0 &&
6055 heap_pages_freeable_pages--;
6058 heap_unlink_page(objspace, heap, sweep_page);
6059 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6061 else if (free_slots > 0) {
6062 size_pool->freed_slots += ctx.freed_slots;
6063 size_pool->empty_slots += ctx.empty_slots;
6065 if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
6066 heap_add_poolpage(objspace, heap, sweep_page);
6067 pooled_slots += free_slots;
6070 heap_add_freepage(heap, sweep_page);
6071 swept_slots += free_slots;
6072 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6078 sweep_page->free_next = NULL;
6080 }
while ((sweep_page = heap->sweeping_page));
6082 if (!heap->sweeping_page) {
6083 gc_sweep_finish_size_pool(objspace, size_pool);
6085 if (!has_sweeping_pages(objspace)) {
6086 gc_sweep_finish(objspace);
6090#if GC_ENABLE_LAZY_SWEEP
6091 gc_prof_sweep_timer_stop(objspace);
6094 return heap->free_pages != NULL;
6100 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6103 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6104 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6112 GC_ASSERT(dont_gc_val() == FALSE);
6113 if (!GC_ENABLE_LAZY_SWEEP)
return;
6115 gc_sweeping_enter(objspace);
6117 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6119 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6121 if (size_pool == sweep_size_pool) {
6122 if (size_pool->allocatable_pages > 0) {
6123 heap_increment(objspace, size_pool, heap);
6127 gc_sweep_rest(objspace);
6134 gc_sweeping_exit(objspace);
6137#if GC_CAN_COMPILE_COMPACTION
6148 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6149 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6151 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6153 object = rb_gc_location(forwarding_object);
6155 shape_id_t original_shape_id = 0;
6157 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6160 gc_move(objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
6164 if (original_shape_id) {
6165 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6168 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
6169 orig_page->free_slots++;
6170 heap_page_add_freeobj(objspace, orig_page,
object);
6172 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6177 p += BASE_SLOT_SIZE;
6187 bits_t *mark_bits, *pin_bits;
6190 mark_bits = page->mark_bits;
6191 pin_bits = page->pinned_bits;
6193 uintptr_t p = page->start;
6196 bitset = pin_bits[0] & ~mark_bits[0];
6197 bitset >>= NUM_IN_PAGE(p);
6198 invalidate_moved_plane(objspace, page, p, bitset);
6199 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6201 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6204 bitset = pin_bits[i] & ~mark_bits[i];
6206 invalidate_moved_plane(objspace, page, p, bitset);
6207 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6216 gc_mode_transition(objspace, gc_mode_compacting);
6218 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6219 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6220 ccan_list_for_each(&heap->pages, page, page_node) {
6221 page->flags.before_sweep = TRUE;
6224 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
6225 heap->compact_cursor_index = 0;
6228 if (gc_prof_enabled(objspace)) {
6230 record->moved_objects = objspace->rcompactor.total_moved;
6233 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
6234 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
6235 memset(objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
6236 memset(objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
6247 gc_sweeping_enter(objspace);
6249 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6251 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
6253 gc_sweep_start(objspace);
6254 if (objspace->flags.during_compacting) {
6255 gc_sweep_compact(objspace);
6258 if (immediate_sweep) {
6259#if !GC_ENABLE_LAZY_SWEEP
6260 gc_prof_sweep_timer_start(objspace);
6262 gc_sweep_rest(objspace);
6263#if !GC_ENABLE_LAZY_SWEEP
6264 gc_prof_sweep_timer_stop(objspace);
6270 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6272 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6276 gc_sweeping_exit(objspace);
6282stack_chunk_alloc(
void)
6296 return stack->chunk == NULL;
6302 size_t size = stack->index;
6303 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6306 size += stack->limit;
6307 chunk = chunk->next;
6315 chunk->next = stack->cache;
6316 stack->cache = chunk;
6317 stack->cache_size++;
6325 if (stack->unused_cache_size > (stack->cache_size/2)) {
6326 chunk = stack->cache;
6327 stack->cache = stack->cache->next;
6328 stack->cache_size--;
6331 stack->unused_cache_size = stack->cache_size;
6339 GC_ASSERT(stack->index == stack->limit);
6341 if (stack->cache_size > 0) {
6342 next = stack->cache;
6343 stack->cache = stack->cache->next;
6344 stack->cache_size--;
6345 if (stack->unused_cache_size > stack->cache_size)
6346 stack->unused_cache_size = stack->cache_size;
6349 next = stack_chunk_alloc();
6351 next->next = stack->chunk;
6352 stack->chunk = next;
6361 prev = stack->chunk->next;
6362 GC_ASSERT(stack->index == 0);
6363 add_stack_chunk_cache(stack, stack->chunk);
6364 stack->chunk = prev;
6365 stack->index = stack->limit;
6373 while (chunk != NULL) {
6383 mark_stack_chunk_list_free(stack->chunk);
6389 mark_stack_chunk_list_free(stack->cache);
6390 stack->cache_size = 0;
6391 stack->unused_cache_size = 0;
6419 if (stack->index == stack->limit) {
6420 push_mark_stack_chunk(stack);
6422 stack->chunk->data[stack->index++] = data;
6432 rb_bug(
"push_mark_stack() called for broken object");
6436 UNEXPECTED_NODE(push_mark_stack);
6440 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6442 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6448 if (is_mark_stack_empty(stack)) {
6451 if (stack->index == 1) {
6452 *data = stack->chunk->data[--stack->index];
6453 pop_mark_stack_chunk(stack);
6456 *data = stack->chunk->data[--stack->index];
6467 stack->index = stack->limit = STACK_CHUNK_SIZE;
6469 for (i=0; i < 4; i++) {
6470 add_stack_chunk_cache(stack, stack_chunk_alloc());
6472 stack->unused_cache_size = stack->cache_size;
6477#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6479#define STACK_START (ec->machine.stack_start)
6480#define STACK_END (ec->machine.stack_end)
6481#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6483#if STACK_GROW_DIRECTION < 0
6484# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6485#elif STACK_GROW_DIRECTION > 0
6486# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6488# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6489 : (size_t)(STACK_END - STACK_START + 1))
6491#if !STACK_GROW_DIRECTION
6492int ruby_stack_grow_direction;
6494ruby_get_stack_grow_direction(
volatile VALUE *addr)
6497 SET_MACHINE_STACK_END(&end);
6499 if (end > addr)
return ruby_stack_grow_direction = 1;
6500 return ruby_stack_grow_direction = -1;
6509 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6510 return STACK_LENGTH;
6513#define PREVENT_STACK_OVERFLOW 1
6514#ifndef PREVENT_STACK_OVERFLOW
6515#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6516# define PREVENT_STACK_OVERFLOW 1
6518# define PREVENT_STACK_OVERFLOW 0
6521#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6527 size_t length = STACK_LENGTH;
6528 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6530 return length > maximum_length;
6533#define stack_check(ec, water_mark) FALSE
6536#define STACKFRAME_FOR_CALL_CFUNC 2048
6541 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6547 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6567 if (end <= start)
return;
6569 each_location(objspace, start, n, cb);
6575 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6579rb_gc_mark_values(
long n,
const VALUE *values)
6584 for (i=0; i<n; i++) {
6585 gc_mark(objspace, values[i]);
6594 for (i=0; i<n; i++) {
6595 if (is_markable_object(values[i])) {
6596 gc_mark_and_pin(objspace, values[i]);
6602rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6605 gc_mark_stack_values(objspace, n, values);
6609mark_value(st_data_t key, st_data_t value, st_data_t data)
6612 gc_mark(objspace, (
VALUE)value);
6617mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6620 gc_mark_and_pin(objspace, (
VALUE)value);
6627 if (!tbl || tbl->num_entries == 0)
return;
6628 st_foreach(tbl, mark_value, (st_data_t)objspace);
6634 if (!tbl || tbl->num_entries == 0)
return;
6635 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6639mark_key(st_data_t key, st_data_t value, st_data_t data)
6642 gc_mark_and_pin(objspace, (
VALUE)key);
6650 st_foreach(tbl, mark_key, (st_data_t)objspace);
6654pin_value(st_data_t key, st_data_t value, st_data_t data)
6657 gc_mark_and_pin(objspace, (
VALUE)value);
6665 st_foreach(tbl, pin_value, (st_data_t)objspace);
6675mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6679 gc_mark(objspace, (
VALUE)key);
6680 gc_mark(objspace, (
VALUE)value);
6685pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6689 gc_mark_and_pin(objspace, (
VALUE)key);
6690 gc_mark_and_pin(objspace, (
VALUE)value);
6695pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6699 gc_mark_and_pin(objspace, (
VALUE)key);
6700 gc_mark(objspace, (
VALUE)value);
6707 if (rb_hash_compare_by_id_p(hash)) {
6708 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6711 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6714 gc_mark(objspace, RHASH(hash)->ifnone);
6721 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6735 gc_mark(objspace, me->owner);
6736 gc_mark(objspace, me->defined_class);
6739 switch (def->type) {
6740 case VM_METHOD_TYPE_ISEQ:
6742 gc_mark(objspace, (
VALUE)def->body.iseq.
cref);
6744 if (def->iseq_overload && me->defined_class) {
6747 gc_mark_and_pin(objspace, (
VALUE)me);
6750 case VM_METHOD_TYPE_ATTRSET:
6751 case VM_METHOD_TYPE_IVAR:
6752 gc_mark(objspace, def->body.attr.location);
6754 case VM_METHOD_TYPE_BMETHOD:
6755 gc_mark(objspace, def->body.bmethod.proc);
6756 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6758 case VM_METHOD_TYPE_ALIAS:
6759 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
6761 case VM_METHOD_TYPE_REFINED:
6762 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
6764 case VM_METHOD_TYPE_CFUNC:
6765 case VM_METHOD_TYPE_ZSUPER:
6766 case VM_METHOD_TYPE_MISSING:
6767 case VM_METHOD_TYPE_OPTIMIZED:
6768 case VM_METHOD_TYPE_UNDEF:
6769 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6775static enum rb_id_table_iterator_result
6776mark_method_entry_i(
VALUE me,
void *data)
6780 gc_mark(objspace, me);
6781 return ID_TABLE_CONTINUE;
6788 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6792static enum rb_id_table_iterator_result
6793mark_const_entry_i(
VALUE value,
void *data)
6798 gc_mark(objspace, ce->value);
6799 gc_mark(objspace, ce->file);
6800 return ID_TABLE_CONTINUE;
6807 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6810#if STACK_GROW_DIRECTION < 0
6811#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6812#elif STACK_GROW_DIRECTION > 0
6813#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6815#define GET_STACK_BOUNDS(start, end, appendix) \
6816 ((STACK_END < STACK_START) ? \
6817 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6823#if defined(__wasm__)
6826static VALUE *rb_stack_range_tmp[2];
6829rb_mark_locations(
void *begin,
void *end)
6831 rb_stack_range_tmp[0] = begin;
6832 rb_stack_range_tmp[1] = end;
6835# if defined(__EMSCRIPTEN__)
6840 emscripten_scan_stack(rb_mark_locations);
6841 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6843 emscripten_scan_registers(rb_mark_locations);
6844 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6851 VALUE *stack_start, *stack_end;
6853 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6854 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6856 rb_wasm_scan_locals(rb_mark_locations);
6857 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6869 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(
VALUE))];
6870 } save_regs_gc_mark;
6871 VALUE *stack_start, *stack_end;
6873 FLUSH_REGISTER_WINDOWS;
6874 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6876 rb_setjmp(save_regs_gc_mark.j);
6882 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6884 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6886 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6894 VALUE *stack_start, *stack_end;
6896 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6897 RUBY_DEBUG_LOG(
"ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
6898 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6904 each_machine_stack_value(ec, gc_mark_maybe);
6912 gc_mark_locations(objspace, stack_start, stack_end, cb);
6914#if defined(__mc68000__)
6915 gc_mark_locations(objspace,
6916 (
VALUE*)((
char*)stack_start + 2),
6917 (
VALUE*)((
char*)stack_end - 2), cb);
6936 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6938 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6939 void *ptr = asan_unpoison_object_temporary(obj);
6947 gc_mark_and_pin(objspace, obj);
6953 asan_poison_object(obj);
6967 ASSERT_vm_locking();
6968 if (RVALUE_MARKED(obj))
return 0;
6969 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6976 struct heap_page *page = GET_HEAP_PAGE(obj);
6977 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6979 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6980 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
6981 MARK_IN_BITMAP(uncollectible_bits, obj);
6982 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6984#if RGENGC_PROFILE > 0
6985 objspace->profile.total_remembered_shady_object_count++;
6986#if RGENGC_PROFILE >= 2
6987 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
7000 const VALUE old_parent = objspace->rgengc.parent_object;
7003 if (RVALUE_WB_UNPROTECTED(obj) || !RVALUE_OLD_P(obj)) {
7004 rgengc_remember(objspace, old_parent);
7008 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7014#if RGENGC_CHECK_MODE
7015 if (RVALUE_MARKED(obj) == FALSE) rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
7016 if (RVALUE_MARKING(obj) == TRUE) rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
7019 if (is_incremental_marking(objspace)) {
7020 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7023 push_mark_stack(&objspace->mark_stack, obj);
7029 struct heap_page *page = GET_HEAP_PAGE(obj);
7031 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7032 check_rvalue_consistency(obj);
7034 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7035 if (!RVALUE_OLD_P(obj)) {
7036 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
7037 RVALUE_AGE_INC(objspace, obj);
7039 else if (is_full_marking(objspace)) {
7040 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7041 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7044 check_rvalue_consistency(obj);
7046 objspace->marked_slots++;
7050static void reachable_objects_from_callback(
VALUE obj);
7055 if (LIKELY(during_gc)) {
7056 rgengc_check_relation(objspace, obj);
7057 if (!gc_mark_set(objspace, obj))
return;
7060 if (objspace->rgengc.parent_object) {
7061 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
7062 (
void *)obj, obj_type_name(obj),
7063 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7066 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
7070 if (UNLIKELY(RB_TYPE_P(obj,
T_NONE))) {
7072 rb_bug(
"try to mark T_NONE object");
7074 gc_aging(objspace, obj);
7075 gc_grey(objspace, obj);
7078 reachable_objects_from_callback(obj);
7085 GC_ASSERT(is_markable_object(obj));
7086 if (UNLIKELY(objspace->flags.during_compacting)) {
7087 if (LIKELY(during_gc)) {
7088 if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
7089 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
7090 GET_HEAP_PAGE(obj)->pinned_slots++;
7091 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7100 if (!is_markable_object(obj))
return;
7101 gc_pin(objspace, obj);
7102 gc_mark_ptr(objspace, obj);
7108 if (!is_markable_object(obj))
return;
7109 gc_mark_ptr(objspace, obj);
7125rb_gc_mark_and_move(
VALUE *ptr)
7130 if (UNLIKELY(objspace->flags.during_reference_updating)) {
7131 GC_ASSERT(objspace->flags.during_compacting);
7132 GC_ASSERT(during_gc);
7134 *ptr = rb_gc_location(*ptr);
7137 gc_mark_ptr(objspace, *ptr);
7142rb_gc_mark_weak(
VALUE *ptr)
7146 if (UNLIKELY(!during_gc))
return;
7151 GC_ASSERT(objspace->rgengc.parent_object == 0 ||
FL_TEST(objspace->rgengc.parent_object,
FL_WB_PROTECTED));
7153 if (UNLIKELY(RB_TYPE_P(obj,
T_NONE))) {
7155 rb_bug(
"try to mark T_NONE object");
7161 if (!is_full_marking(objspace) && RVALUE_OLD_P(obj)) {
7162 GC_ASSERT(RVALUE_MARKED(obj));
7163 GC_ASSERT(!objspace->flags.during_compacting);
7168 rgengc_check_relation(objspace, obj);
7170 rb_darray_append_without_gc(&objspace->weak_references, ptr);
7172 objspace->profile.weak_references_count++;
7176rb_gc_remove_weak(
VALUE parent_obj,
VALUE *ptr)
7182 if (!is_incremental_marking(objspace))
return;
7185 if (!RVALUE_MARKED(parent_obj))
return;
7188 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
7189 if (*ptr_ptr == ptr) {
7201rb_objspace_marked_object_p(
VALUE obj)
7203 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7209 if (RVALUE_OLD_P(obj)) {
7210 objspace->rgengc.parent_object = obj;
7213 objspace->rgengc.parent_object =
Qfalse;
7220 switch (imemo_type(obj)) {
7225 if (LIKELY(env->ep)) {
7227 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7228 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7229 rb_gc_mark_values((
long)env->env_size, env->env);
7230 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7231 gc_mark(objspace, (
VALUE)rb_vm_env_prev_env(env));
7232 gc_mark(objspace, (
VALUE)env->iseq);
7237 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7238 gc_mark(objspace, (
VALUE)RANY(obj)->as.imemo.cref.next);
7239 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7242 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7243 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7244 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7245 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7247 case imemo_throw_data:
7248 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7251 gc_mark_maybe(objspace, (
VALUE)RANY(obj)->as.imemo.ifunc.data);
7254 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7255 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7256 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7259 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7262 rb_iseq_mark_and_move((
rb_iseq_t *)obj,
false);
7268 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7269 }
while ((m = m->next) != NULL);
7273 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7275 case imemo_parser_strterm:
7277 case imemo_callinfo:
7279 case imemo_callcache:
7301 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
7302 gc_mark(objspace, (
VALUE)cc->cme_);
7306 case imemo_constcache:
7309 gc_mark(objspace, ice->value);
7312#if VM_CHECK_MODE > 0
7314 VM_UNREACHABLE(gc_mark_imemo);
7322 return (
type->flags & RUBY_TYPED_DECL_MARKING) != 0;
7330 register RVALUE *any = RANY(obj);
7331 gc_mark_set_parent(objspace, obj);
7334 rb_mark_generic_ivar(obj);
7347 rb_bug(
"rb_gc_mark() called for broken object");
7351 UNEXPECTED_NODE(rb_gc_mark);
7355 gc_mark_imemo(objspace, obj);
7362 gc_mark(objspace, any->as.basic.
klass);
7367 gc_mark(objspace, RCLASS_ATTACHED_OBJECT(obj));
7375 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7376 mark_cvc_tbl(objspace, obj);
7377 cc_table_mark(objspace, obj);
7378 if (rb_shape_obj_too_complex(obj)) {
7379 mark_tbl_no_pin(objspace, (
st_table *)RCLASS_IVPTR(obj));
7382 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7383 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7386 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7388 gc_mark(objspace, RCLASS_EXT(obj)->classpath);
7392 if (RICLASS_OWNS_M_TBL_P(obj)) {
7393 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7399 if (RCLASS_INCLUDER(obj)) {
7400 gc_mark(objspace, RCLASS_INCLUDER(obj));
7402 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7403 cc_table_mark(objspace, obj);
7407 if (ARY_SHARED_P(obj)) {
7408 VALUE root = ARY_SHARED_ROOT(obj);
7409 gc_mark(objspace, root);
7414 for (i=0; i <
len; i++) {
7415 gc_mark(objspace, ptr[i]);
7421 mark_hash(objspace, obj);
7425 if (STR_SHARED_P(obj)) {
7444 if (
RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.
type)) {
7445 size_t *offset_list = (
size_t *)RANY(obj)->as.typeddata.type->function.dmark;
7447 for (
size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
7448 rb_gc_mark_movable(*(
VALUE *)((
char *)ptr + offset));
7455 if (mark_func) (*mark_func)(ptr);
7463 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7464 if (rb_shape_obj_too_complex(obj)) {
7465 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7470 uint32_t i,
len = ROBJECT_IV_COUNT(obj);
7471 for (i = 0; i <
len; i++) {
7472 gc_mark(objspace, ptr[i]);
7479 attr_index_t num_of_ivs = shape->next_iv_index;
7480 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7481 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7488 if (any->as.file.
fptr) {
7489 gc_mark(objspace, any->as.file.
fptr->
self);
7490 gc_mark(objspace, any->as.file.
fptr->
pathv);
7501 gc_mark(objspace, any->as.regexp.
src);
7505 gc_mark(objspace, any->as.match.
regexp);
7506 if (any->as.match.
str) {
7507 gc_mark(objspace, any->as.match.
str);
7512 gc_mark(objspace, any->as.rational.num);
7513 gc_mark(objspace, any->as.rational.den);
7517 gc_mark(objspace, any->as.complex.real);
7518 gc_mark(objspace, any->as.complex.imag);
7524 const long len = RSTRUCT_LEN(obj);
7525 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7527 for (i=0; i<
len; i++) {
7528 gc_mark(objspace, ptr[i]);
7535 rb_gcdebug_print_obj_condition((
VALUE)obj);
7540 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7542 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7551gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7555 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7556 size_t popped_count = 0;
7558 while (pop_mark_stack(mstack, &obj)) {
7559 if (UNDEF_P(obj))
continue;
7561 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7562 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7564 gc_mark_children(objspace, obj);
7567 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7568 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7570 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7573 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7582 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7584 if (is_mark_stack_empty(mstack)) {
7585 shrink_stack_chunk_cache(mstack);
7594gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7596 return gc_mark_stacked_objects(objspace, TRUE, count);
7602 return gc_mark_stacked_objects(objspace, FALSE, 0);
7606#define MAX_TICKS 0x100
7607static tick_t mark_ticks[MAX_TICKS];
7608static const char *mark_ticks_categories[MAX_TICKS];
7611show_mark_ticks(
void)
7614 fprintf(stderr,
"mark ticks result:\n");
7615 for (i=0; i<MAX_TICKS; i++) {
7616 const char *category = mark_ticks_categories[i];
7618 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7629gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7633 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7636 tick_t start_tick = tick();
7638 const char *prev_category = 0;
7640 if (mark_ticks_categories[0] == 0) {
7641 atexit(show_mark_ticks);
7645 if (categoryp) *categoryp =
"xxx";
7647 objspace->rgengc.parent_object =
Qfalse;
7650#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7651 if (prev_category) { \
7652 tick_t t = tick(); \
7653 mark_ticks[tick_count] = t - start_tick; \
7654 mark_ticks_categories[tick_count] = prev_category; \
7657 prev_category = category; \
7658 start_tick = tick(); \
7661#define MARK_CHECKPOINT_PRINT_TICK(category)
7664#define MARK_CHECKPOINT(category) do { \
7665 if (categoryp) *categoryp = category; \
7666 MARK_CHECKPOINT_PRINT_TICK(category); \
7669 MARK_CHECKPOINT(
"vm");
7672 if (vm->self) gc_mark(objspace, vm->self);
7674 MARK_CHECKPOINT(
"finalizers");
7675 mark_finalizer_tbl(objspace, finalizer_table);
7677 MARK_CHECKPOINT(
"machine_context");
7678 mark_current_machine_context(objspace, ec);
7681 MARK_CHECKPOINT(
"global_list");
7682 for (list = global_list; list; list = list->next) {
7683 gc_mark_maybe(objspace, *list->varptr);
7686 MARK_CHECKPOINT(
"end_proc");
7689 MARK_CHECKPOINT(
"global_tbl");
7690 rb_gc_mark_global_tbl();
7692 MARK_CHECKPOINT(
"object_id");
7693 rb_gc_mark(objspace->next_object_id);
7694 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7696 if (stress_to_class) rb_gc_mark(stress_to_class);
7698 MARK_CHECKPOINT(
"finish");
7699#undef MARK_CHECKPOINT
7702#if RGENGC_CHECK_MODE >= 4
7704#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7705#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7706#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7714static struct reflist *
7715reflist_create(
VALUE obj)
7717 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7720 refs->list[0] = obj;
7726reflist_destruct(
struct reflist *refs)
7733reflist_add(
struct reflist *refs,
VALUE obj)
7735 if (refs->pos == refs->size) {
7737 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
7740 refs->list[refs->pos++] = obj;
7744reflist_dump(
struct reflist *refs)
7747 for (i=0; i<refs->pos; i++) {
7748 VALUE obj = refs->list[i];
7749 if (IS_ROOTSIG(obj)) {
7750 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7753 fprintf(stderr,
"<%s>", obj_info(obj));
7755 if (i+1 < refs->pos) fprintf(stderr,
", ");
7760reflist_referred_from_machine_context(
struct reflist *refs)
7763 for (i=0; i<refs->pos; i++) {
7764 VALUE obj = refs->list[i];
7765 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7780 const char *category;
7786allrefs_add(
struct allrefs *data,
VALUE obj)
7788 struct reflist *refs;
7791 if (st_lookup(data->references, obj, &r)) {
7792 refs = (
struct reflist *)r;
7793 reflist_add(refs, data->root_obj);
7797 refs = reflist_create(data->root_obj);
7798 st_insert(data->references, obj, (st_data_t)refs);
7804allrefs_i(
VALUE obj,
void *ptr)
7806 struct allrefs *data = (
struct allrefs *)ptr;
7808 if (allrefs_add(data, obj)) {
7809 push_mark_stack(&data->mark_stack, obj);
7814allrefs_roots_i(
VALUE obj,
void *ptr)
7816 struct allrefs *data = (
struct allrefs *)ptr;
7817 if (strlen(data->category) == 0) rb_bug(
"!!!");
7818 data->root_obj = MAKE_ROOTSIG(data->category);
7820 if (allrefs_add(data, obj)) {
7821 push_mark_stack(&data->mark_stack, obj);
7824#define PUSH_MARK_FUNC_DATA(v) do { \
7825 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7826 GET_RACTOR()->mfd = (v);
7828#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7833 struct allrefs data;
7834 struct gc_mark_func_data_struct mfd;
7836 int prev_dont_gc = dont_gc_val();
7839 data.objspace = objspace;
7840 data.references = st_init_numtable();
7841 init_mark_stack(&data.mark_stack);
7843 mfd.mark_func = allrefs_roots_i;
7847 PUSH_MARK_FUNC_DATA(&mfd);
7848 GET_RACTOR()->mfd = &mfd;
7849 gc_mark_roots(objspace, &data.category);
7850 POP_MARK_FUNC_DATA();
7853 while (pop_mark_stack(&data.mark_stack, &obj)) {
7854 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7856 free_stack_chunks(&data.mark_stack);
7858 dont_gc_set(prev_dont_gc);
7859 return data.references;
7863objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7865 struct reflist *refs = (
struct reflist *)value;
7866 reflist_destruct(refs);
7871objspace_allrefs_destruct(
struct st_table *refs)
7873 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7874 st_free_table(refs);
7877#if RGENGC_CHECK_MODE >= 5
7879allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7882 struct reflist *refs = (
struct reflist *)v;
7883 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7885 fprintf(stderr,
"\n");
7892 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7893 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7894 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7899gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7902 struct reflist *refs = (
struct reflist *)v;
7906 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7907 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7908 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7911 if (reflist_referred_from_machine_context(refs)) {
7912 fprintf(stderr,
" (marked from machine stack).\n");
7916 objspace->rgengc.error_count++;
7917 fprintf(stderr,
"\n");
7924gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7926 size_t saved_malloc_increase = objspace->malloc_params.increase;
7927#if RGENGC_ESTIMATE_OLDMALLOC
7928 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7930 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7932 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7935 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7938 if (objspace->rgengc.error_count > 0) {
7939#if RGENGC_CHECK_MODE >= 5
7940 allrefs_dump(objspace);
7942 if (checker_name) rb_bug(
"%s: GC has problem.", checker_name);
7945 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7946 objspace->rgengc.allrefs_table = 0;
7948 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7949 objspace->malloc_params.increase = saved_malloc_increase;
7950#if RGENGC_ESTIMATE_OLDMALLOC
7951 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7959 size_t live_object_count;
7960 size_t zombie_object_count;
7963 size_t old_object_count;
7964 size_t remembered_shady_count;
7968check_generation_i(
const VALUE child,
void *ptr)
7971 const VALUE parent = data->parent;
7973 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7975 if (!RVALUE_OLD_P(child)) {
7976 if (!RVALUE_REMEMBERED(parent) &&
7977 !RVALUE_REMEMBERED(child) &&
7978 !RVALUE_UNCOLLECTIBLE(child)) {
7979 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7986check_color_i(
const VALUE child,
void *ptr)
7989 const VALUE parent = data->parent;
7991 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7992 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7993 obj_info(parent), obj_info(child));
7999check_children_i(
const VALUE child,
void *ptr)
8002 if (check_rvalue_consistency_force(child, FALSE) != 0) {
8003 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
8004 obj_info(child), obj_info(data->parent));
8005 rb_print_backtrace(stderr);
8012verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
8018 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
8019 void *poisoned = asan_unpoison_object_temporary(obj);
8021 if (is_live_object(objspace, obj)) {
8023 data->live_object_count++;
8028 if (!gc_object_moved_p(objspace, obj)) {
8030 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
8034 if (RVALUE_OLD_P(obj)) data->old_object_count++;
8035 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
8037 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
8040 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
8043 if (is_incremental_marking(objspace)) {
8044 if (RVALUE_BLACK_P(obj)) {
8047 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
8054 data->zombie_object_count++;
8059 asan_poison_object(obj);
8069 unsigned int has_remembered_shady = FALSE;
8070 unsigned int has_remembered_old = FALSE;
8071 int remembered_old_objects = 0;
8072 int free_objects = 0;
8073 int zombie_objects = 0;
8075 short slot_size = page->slot_size;
8076 uintptr_t start = (uintptr_t)page->start;
8077 uintptr_t end = start + page->total_slots * slot_size;
8079 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8081 void *poisoned = asan_unpoison_object_temporary(val);
8086 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
8087 has_remembered_shady = TRUE;
8089 if (RVALUE_PAGE_MARKING(page, val)) {
8090 has_remembered_old = TRUE;
8091 remembered_old_objects++;
8096 asan_poison_object(val);
8100 if (!is_incremental_marking(objspace) &&
8101 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
8103 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8105 if (RVALUE_PAGE_MARKING(page, val)) {
8106 fprintf(stderr,
"marking -> %s\n", obj_info(val));
8109 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
8110 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
8113 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
8114 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8115 (
void *)page, obj ? obj_info(obj) :
"");
8120 if (page->free_slots != free_objects) {
8121 rb_bug(
"page %p's free_slots should be %d, but %d", (
void *)page, page->free_slots, free_objects);
8124 if (page->final_slots != zombie_objects) {
8125 rb_bug(
"page %p's final_slots should be %d, but %d", (
void *)page, page->final_slots, zombie_objects);
8128 return remembered_old_objects;
8132gc_verify_heap_pages_(
rb_objspace_t *objspace,
struct ccan_list_head *head)
8134 int remembered_old_objects = 0;
8137 ccan_list_for_each(head, page, page_node) {
8138 asan_unlock_freelist(page);
8139 RVALUE *p = page->freelist;
8143 asan_unpoison_object(vp,
false);
8145 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8147 p = p->as.free.next;
8148 asan_poison_object(prev);
8150 asan_lock_freelist(page);
8152 if (page->flags.has_remembered_objects == FALSE) {
8153 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
8157 return remembered_old_objects;
8163 int remembered_old_objects = 0;
8164 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8165 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8166 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8168 return remembered_old_objects;
8182gc_verify_internal_consistency_m(
VALUE dummy)
8193 data.objspace = objspace;
8194 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
8197 for (
size_t i = 0; i < heap_allocated_pages; i++) {
8198 struct heap_page *page = heap_pages_sorted[i];
8199 short slot_size = page->slot_size;
8201 uintptr_t start = (uintptr_t)page->start;
8202 uintptr_t end = start + page->total_slots * slot_size;
8204 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
8207 if (data.err_count != 0) {
8208#if RGENGC_CHECK_MODE >= 5
8209 objspace->rgengc.error_count = data.err_count;
8210 gc_marks_check(objspace, NULL, NULL);
8211 allrefs_dump(objspace);
8213 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
8217 gc_verify_heap_pages(objspace);
8221 if (!is_lazy_sweeping(objspace) &&
8223 ruby_single_main_ractor != NULL) {
8224 if (objspace_live_slots(objspace) != data.live_object_count) {
8225 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", total_freed_objects: %"PRIdSIZE
"\n",
8226 heap_pages_final_slots, total_freed_objects(objspace));
8227 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8228 objspace_live_slots(objspace), data.live_object_count);
8232 if (!is_marking(objspace)) {
8233 if (objspace->rgengc.old_objects != data.old_object_count) {
8234 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8235 objspace->rgengc.old_objects, data.old_object_count);
8237 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8238 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8239 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8244 size_t list_count = 0;
8247 VALUE z = heap_pages_deferred_final;
8250 z = RZOMBIE(z)->next;
8254 if (heap_pages_final_slots != data.zombie_object_count ||
8255 heap_pages_final_slots != list_count) {
8257 rb_bug(
"inconsistent finalizing object count:\n"
8258 " expect %"PRIuSIZE
"\n"
8259 " but %"PRIuSIZE
" zombies\n"
8260 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
8261 heap_pages_final_slots,
8262 data.zombie_object_count,
8267 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
8277 unsigned int prev_during_gc = during_gc;
8280 gc_verify_internal_consistency_(objspace);
8282 during_gc = prev_during_gc;
8288rb_gc_verify_internal_consistency(
void)
8294heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
8296 if (heap->pooled_pages) {
8297 if (heap->free_pages) {
8298 struct heap_page *free_pages_tail = heap->free_pages;
8299 while (free_pages_tail->free_next) {
8300 free_pages_tail = free_pages_tail->free_next;
8302 free_pages_tail->free_next = heap->pooled_pages;
8305 heap->free_pages = heap->pooled_pages;
8308 heap->pooled_pages = NULL;
8318 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
8319 gc_mode_transition(objspace, gc_mode_marking);
8322 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8323 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8325 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
8326 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
8327 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
8328 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8329 objspace->flags.during_minor_gc = FALSE;
8330 if (ruby_enable_autocompact) {
8331 objspace->flags.during_compacting |= TRUE;
8333 objspace->profile.major_gc_count++;
8334 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8335 objspace->rgengc.old_objects = 0;
8336 objspace->rgengc.last_major_gc = objspace->profile.count;
8337 objspace->marked_slots = 0;
8339 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8341 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8342 rgengc_mark_and_rememberset_clear(objspace, heap);
8343 heap_move_pooled_pages_to_free_pages(heap);
8345 if (objspace->flags.during_compacting) {
8348 ccan_list_for_each(&heap->pages, page, page_node) {
8349 page->pinned_slots = 0;
8355 objspace->flags.during_minor_gc = TRUE;
8356 objspace->marked_slots =
8357 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
8358 objspace->profile.minor_gc_count++;
8360 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8361 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8365 gc_mark_roots(objspace, NULL);
8367 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
8368 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->
mark_stack));
8372gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8377 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
8378 GC_ASSERT(RVALUE_WB_UNPROTECTED((
VALUE)p));
8379 GC_ASSERT(RVALUE_MARKED((
VALUE)p));
8380 gc_mark_children(objspace, (
VALUE)p);
8382 p += BASE_SLOT_SIZE;
8393 ccan_list_for_each(&heap->pages, page, page_node) {
8394 bits_t *mark_bits = page->mark_bits;
8395 bits_t *wbun_bits = page->wb_unprotected_bits;
8396 uintptr_t p = page->start;
8399 bits_t bits = mark_bits[0] & wbun_bits[0];
8400 bits >>= NUM_IN_PAGE(p);
8401 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8402 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8404 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8405 bits_t bits = mark_bits[j] & wbun_bits[j];
8407 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8408 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8412 gc_mark_stacked_objects_all(objspace);
8418 size_t retained_weak_references_count = 0;
8420 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
8421 if (!*ptr_ptr)
continue;
8423 VALUE obj = **ptr_ptr;
8427 if (!RVALUE_MARKED(obj)) {
8431 retained_weak_references_count++;
8435 objspace->profile.retained_weak_references_count = retained_weak_references_count;
8437 rb_darray_clear(objspace->weak_references);
8438 rb_darray_resize_capa_without_gc(&objspace->weak_references, retained_weak_references_count);
8445 if (is_incremental_marking(objspace)) {
8446 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8447 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
8448 mark_stack_size(&objspace->mark_stack));
8451 gc_mark_roots(objspace, 0);
8452 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
false);
8454#if RGENGC_CHECK_MODE >= 2
8455 if (gc_verify_heap_pages(objspace) != 0) {
8456 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
8460 objspace->flags.during_incremental_marking = FALSE;
8462 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8463 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8467 gc_update_weak_references(objspace);
8469#if RGENGC_CHECK_MODE >= 2
8470 gc_verify_internal_consistency(objspace);
8473#if RGENGC_CHECK_MODE >= 4
8475 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
8481 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8482 size_t sweep_slots = total_slots - objspace->marked_slots;
8483 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8484 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8485 int full_marking = is_full_marking(objspace);
8486 const int r_cnt = GET_VM()->ractor.cnt;
8487 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8489 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8492 size_t total_init_slots = 0;
8493 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8494 total_init_slots += gc_params.size_pool_init_slots[i] * r_mul;
8497 if (max_free_slots < total_init_slots) {
8498 max_free_slots = total_init_slots;
8501 if (sweep_slots > max_free_slots) {
8502 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8505 heap_pages_freeable_pages = 0;
8509 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8510 min_free_slots = gc_params.heap_free_slots * r_mul;
8513 if (sweep_slots < min_free_slots) {
8514 if (!full_marking) {
8515 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8516 full_marking = TRUE;
8521 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8522 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8529 const double r = gc_params.oldobject_limit_factor;
8530 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
8531 (
size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
8532 (
size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
8534 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8537 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8538 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8540 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8541 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8543 if (RGENGC_FORCE_MAJOR_GC) {
8544 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8547 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8548 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8549 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8550 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8551 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8554 rb_ractor_finish_marking();
8560gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
8562 return heap->sweeping_page == heap->compact_cursor;
8573 obj_size = rb_ary_size_as_embedded(src);
8577 if (rb_shape_obj_too_complex(src)) {
8578 return &size_pools[0];
8586 obj_size = rb_str_size_as_embedded(src);
8590 obj_size =
sizeof(
struct RHash) + (RHASH_ST_TABLE_P(src) ? sizeof(
st_table) : sizeof(ar_table));
8597 if (rb_gc_size_allocatable_p(obj_size)){
8598 idx = size_pool_idx_for_size(obj_size);
8600 return &size_pools[idx];
8607 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8609 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8610 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8614 if (gc_compact_heap_cursors_met_p(dheap)) {
8615 return dheap != heap;
8619 orig_shape = rb_shape_get_shape(src);
8620 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8621 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8622 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8625 dest_pool = size_pool;
8631 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8633 .page = dheap->sweeping_page,
8642 lock_page_body(objspace, GET_PAGE_BODY(src));
8643 gc_sweep_page(objspace, dheap, &ctx);
8644 unlock_page_body(objspace, GET_PAGE_BODY(src));
8646 if (dheap->sweeping_page->free_slots > 0) {
8647 heap_add_freepage(dheap, dheap->sweeping_page);
8650 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8651 if (gc_compact_heap_cursors_met_p(dheap)) {
8652 return dheap != heap;
8658 VALUE dest = rb_gc_location(src);
8659 rb_shape_set_shape(dest, new_shape);
8661 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8670 short slot_size = page->slot_size;
8671 short slot_bits = slot_size / BASE_SLOT_SIZE;
8672 GC_ASSERT(slot_bits > 0);
8676 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
8679 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE(vp)]++;
8681 if (gc_is_moveable_obj(objspace, vp)) {
8682 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8689 bitset >>= slot_bits;
8699 GC_ASSERT(page == heap->compact_cursor);
8701 bits_t *mark_bits, *pin_bits;
8703 uintptr_t p = page->start;
8705 mark_bits = page->mark_bits;
8706 pin_bits = page->pinned_bits;
8709 bitset = (mark_bits[0] & ~pin_bits[0]);
8710 bitset >>= NUM_IN_PAGE(p);
8712 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8715 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8717 for (
int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8718 bitset = (mark_bits[j] & ~pin_bits[j]);
8720 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8723 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8732 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8734 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8736 if (heap->total_pages > 0 &&
8737 !gc_compact_heap_cursors_met_p(heap)) {
8748 gc_compact_start(objspace);
8749#if RGENGC_CHECK_MODE >= 2
8750 gc_verify_internal_consistency(objspace);
8753 while (!gc_compact_all_compacted_p(objspace)) {
8754 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8756 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8758 if (gc_compact_heap_cursors_met_p(heap)) {
8762 struct heap_page *start_page = heap->compact_cursor;
8764 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8765 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8772 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8773 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8777 gc_compact_finish(objspace);
8779#if RGENGC_CHECK_MODE >= 2
8780 gc_verify_internal_consistency(objspace);
8787 gc_report(1, objspace,
"gc_marks_rest\n");
8789 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8790 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8793 if (is_incremental_marking(objspace)) {
8794 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8797 gc_mark_stacked_objects_all(objspace);
8800 gc_marks_finish(objspace);
8806 bool marking_finished =
false;
8808 GC_ASSERT(is_marking(objspace));
8809 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8810 gc_marks_finish(objspace);
8812 marking_finished =
true;
8815 return marking_finished;
8821 GC_ASSERT(dont_gc_val() == FALSE);
8822 bool marking_finished =
true;
8824 gc_marking_enter(objspace);
8826 if (heap->free_pages) {
8827 gc_report(2, objspace,
"gc_marks_continue: has pooled pages");
8829 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
8832 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8833 mark_stack_size(&objspace->mark_stack));
8834 size_pool->force_incremental_marking_finish_count++;
8835 gc_marks_rest(objspace);
8838 gc_marking_exit(objspace);
8840 return marking_finished;
8846 gc_prof_mark_timer_start(objspace);
8847 gc_marking_enter(objspace);
8849 bool marking_finished =
false;
8853 gc_marks_start(objspace, full_mark);
8854 if (!is_incremental_marking(objspace)) {
8855 gc_marks_rest(objspace);
8856 marking_finished =
true;
8859#if RGENGC_PROFILE > 0
8860 if (gc_prof_record(objspace)) {
8862 record->old_objects = objspace->rgengc.old_objects;
8866 gc_marking_exit(objspace);
8867 gc_prof_mark_timer_stop(objspace);
8869 return marking_finished;
8875gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8877 if (level <= RGENGC_DEBUG) {
8881 const char *status =
" ";
8884 status = is_full_marking(objspace) ?
"+" :
"-";
8887 if (is_lazy_sweeping(objspace)) {
8890 if (is_incremental_marking(objspace)) {
8895 va_start(args, fmt);
8896 vsnprintf(buf, 1024, fmt, args);
8899 fprintf(out,
"%s|", status);
8909 struct heap_page *page = GET_HEAP_PAGE(obj);
8910 bits_t *bits = &page->remembered_bits[0];
8912 if (MARKED_IN_BITMAP(bits, obj)) {
8916 page->flags.has_remembered_objects = TRUE;
8917 MARK_IN_BITMAP(bits, obj);
8928 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8929 RVALUE_REMEMBERED(obj) ?
"was already remembered" :
"is remembered now");
8931 check_rvalue_consistency(obj);
8933 if (RGENGC_CHECK_MODE) {
8934 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8937#if RGENGC_PROFILE > 0
8938 if (!RVALUE_REMEMBERED(obj)) {
8939 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8940 objspace->profile.total_remembered_normal_object_count++;
8941#if RGENGC_PROFILE >= 2
8942 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8948 return rgengc_remembersetbits_set(objspace, obj);
8951#ifndef PROFILE_REMEMBERSET_MARK
8952#define PROFILE_REMEMBERSET_MARK 0
8956rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8962 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8963 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8964 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8966 gc_mark_children(objspace, obj);
8968 p += BASE_SLOT_SIZE;
8979#if PROFILE_REMEMBERSET_MARK
8980 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8982 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8984 ccan_list_for_each(&heap->pages, page, page_node) {
8985 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
8986 uintptr_t p = page->start;
8987 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8988 bits_t *remembered_bits = page->remembered_bits;
8989 bits_t *uncollectible_bits = page->uncollectible_bits;
8990 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8991#if PROFILE_REMEMBERSET_MARK
8992 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
8993 else if (page->flags.has_remembered_objects) has_old++;
8994 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
8996 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8997 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8998 remembered_bits[j] = 0;
9000 page->flags.has_remembered_objects = FALSE;
9003 bitset >>= NUM_IN_PAGE(p);
9004 rgengc_rememberset_mark_plane(objspace, p, bitset);
9005 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
9007 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
9009 rgengc_rememberset_mark_plane(objspace, p, bitset);
9010 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
9013#if PROFILE_REMEMBERSET_MARK
9020#if PROFILE_REMEMBERSET_MARK
9021 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
9023 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
9031 ccan_list_for_each(&heap->pages, page, page_node) {
9032 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9033 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9034 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9035 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9036 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9037 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
9038 page->flags.has_remembered_objects = FALSE;
9049 if (RGENGC_CHECK_MODE) {
9050 if (!RVALUE_OLD_P(a)) rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
9051 if ( RVALUE_OLD_P(b)) rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
9052 if (is_incremental_marking(objspace)) rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
9056 if (!RVALUE_REMEMBERED(a)) {
9057 RB_VM_LOCK_ENTER_NO_BARRIER();
9059 rgengc_remember(objspace, a);
9061 RB_VM_LOCK_LEAVE_NO_BARRIER();
9062 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
9065 check_rvalue_consistency(a);
9066 check_rvalue_consistency(b);
9072 gc_mark_set_parent(objspace, parent);
9073 rgengc_check_relation(objspace, obj);
9074 if (gc_mark_set(objspace, obj) == FALSE)
return;
9075 gc_aging(objspace, obj);
9076 gc_grey(objspace, obj);
9084 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
9086 if (RVALUE_BLACK_P(a)) {
9087 if (RVALUE_WHITE_P(b)) {
9088 if (!RVALUE_WB_UNPROTECTED(a)) {
9089 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
9090 gc_mark_from(objspace, b, a);
9093 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9094 rgengc_remember(objspace, a);
9097 if (UNLIKELY(objspace->flags.during_compacting)) {
9098 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9108 if (RGENGC_CHECK_MODE) {
9109 if (
SPECIAL_CONST_P(a)) rb_bug(
"rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9110 if (
SPECIAL_CONST_P(b)) rb_bug(
"rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9114 if (!is_incremental_marking(objspace)) {
9115 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9119 gc_writebarrier_generational(a, b, objspace);
9125 RB_VM_LOCK_ENTER_NO_BARRIER();
9127 if (is_incremental_marking(objspace)) {
9128 gc_writebarrier_incremental(a, b, objspace);
9134 RB_VM_LOCK_LEAVE_NO_BARRIER();
9136 if (retry)
goto retry;
9144 if (RVALUE_WB_UNPROTECTED(obj)) {
9150 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9151 RVALUE_REMEMBERED(obj) ?
" (already remembered)" :
"");
9153 RB_VM_LOCK_ENTER_NO_BARRIER();
9155 if (RVALUE_OLD_P(obj)) {
9156 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9157 RVALUE_DEMOTE(objspace, obj);
9158 gc_mark_set(objspace, obj);
9159 gc_remember_unprotected(objspace, obj);
9162 objspace->profile.total_shade_operation_count++;
9163#if RGENGC_PROFILE >= 2
9164 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
9169 RVALUE_AGE_RESET(obj);
9172 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9173 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9175 RB_VM_LOCK_LEAVE_NO_BARRIER();
9183rb_gc_writebarrier_remember(
VALUE obj)
9187 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9189 if (is_incremental_marking(objspace)) {
9190 if (RVALUE_BLACK_P(obj)) {
9191 gc_grey(objspace, obj);
9195 if (RVALUE_OLD_P(obj)) {
9196 rgengc_remember(objspace, obj);
9202rb_copy_wb_protected_attribute(
VALUE dest,
VALUE obj)
9206 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9207 if (!RVALUE_OLD_P(dest)) {
9208 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9209 RVALUE_AGE_RESET(dest);
9212 RVALUE_DEMOTE(objspace, dest);
9216 check_rvalue_consistency(dest);
9222rb_obj_rgengc_writebarrier_protected_p(
VALUE obj)
9224 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9228rb_obj_rgengc_promoted_p(
VALUE obj)
9234rb_obj_gc_flags(
VALUE obj,
ID* flags,
size_t max)
9237 static ID ID_marked;
9238 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9241#define I(s) ID_##s = rb_intern(#s);
9251 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9252 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9253 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9254 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9255 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9256 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9265 newobj_cache->incremental_mark_step_allocated_slots = 0;
9267 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9270 struct heap_page *page = cache->using_page;
9271 RVALUE *freelist = cache->freelist;
9272 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
9274 heap_page_freelist_append(page, freelist);
9276 cache->using_page = NULL;
9277 cache->freelist = NULL;
9287#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9288#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9294 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
9299 VALUE ary_ary = GET_VM()->mark_object_ary;
9300 VALUE ary = rb_ary_last(0, 0, ary_ary);
9303 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9304 rb_ary_push(ary_ary, ary);
9307 rb_ary_push(ary, obj);
9321 tmp->next = global_list;
9331 rb_warn(
"Object is assigned to registering address already: %"PRIsVALUE,
9333 rb_print_backtrace(stderr);
9341 struct gc_list *tmp = global_list;
9343 if (tmp->varptr == addr) {
9344 global_list = tmp->next;
9349 if (tmp->next->varptr == addr) {
9350 struct gc_list *t = tmp->next;
9352 tmp->next = tmp->next->next;
9363 rb_gc_register_address(var);
9370 gc_stress_no_immediate_sweep,
9371 gc_stress_full_mark_after_malloc,
9375#define gc_stress_full_mark_after_malloc_p() \
9376 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9381 if (!heap->free_pages) {
9382 if (!heap_increment(objspace, size_pool, heap)) {
9383 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9384 heap_increment(objspace, size_pool, heap);
9392 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9393 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9395 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9405gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
9407 gc_prof_set_malloc_info(objspace);
9409 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9410 size_t old_limit = malloc_limit;
9412 if (inc > malloc_limit) {
9413 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9414 if (malloc_limit > gc_params.malloc_limit_max) {
9415 malloc_limit = gc_params.malloc_limit_max;
9419 malloc_limit = (size_t)(malloc_limit * 0.98);
9420 if (malloc_limit < gc_params.malloc_limit_min) {
9421 malloc_limit = gc_params.malloc_limit_min;
9426 if (old_limit != malloc_limit) {
9427 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
9428 rb_gc_count(), old_limit, malloc_limit);
9431 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
9432 rb_gc_count(), malloc_limit);
9438#if RGENGC_ESTIMATE_OLDMALLOC
9440 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9441 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9442 objspace->rgengc.oldmalloc_increase_limit =
9443 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9445 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9446 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9450 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
9452 objspace->rgengc.need_major_gc,
9453 objspace->rgengc.oldmalloc_increase,
9454 objspace->rgengc.oldmalloc_increase_limit,
9455 gc_params.oldmalloc_limit_max);
9459 objspace->rgengc.oldmalloc_increase = 0;
9461 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9462 objspace->rgengc.oldmalloc_increase_limit =
9463 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9464 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9465 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9473garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
9479#if GC_PROFILE_MORE_DETAIL
9480 objspace->profile.prepare_time = getrusage_time();
9485#if GC_PROFILE_MORE_DETAIL
9486 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9489 ret = gc_start(objspace, reason);
9499 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9502 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9504 if (!heap_allocated_pages)
return FALSE;
9505 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
9507 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9508 GC_ASSERT(!is_lazy_sweeping(objspace));
9509 GC_ASSERT(!is_incremental_marking(objspace));
9511 unsigned int lock_lev;
9512 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9514#if RGENGC_CHECK_MODE >= 2
9515 gc_verify_internal_consistency(objspace);
9518 if (ruby_gc_stressful) {
9519 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
9521 if ((flag & (1<<gc_stress_no_major)) == 0) {
9522 do_full_mark = TRUE;
9525 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9528 if (objspace->rgengc.need_major_gc) {
9529 reason |= objspace->rgengc.need_major_gc;
9530 do_full_mark = TRUE;
9532 else if (RGENGC_FORCE_MAJOR_GC) {
9533 reason = GPR_FLAG_MAJOR_BY_FORCE;
9534 do_full_mark = TRUE;
9537 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9539 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9540 reason |= GPR_FLAG_MAJOR_BY_FORCE;
9543 if (objspace->flags.dont_incremental ||
9544 reason & GPR_FLAG_IMMEDIATE_MARK ||
9545 ruby_gc_stressful) {
9546 objspace->flags.during_incremental_marking = FALSE;
9549 objspace->flags.during_incremental_marking = do_full_mark;
9553 if (do_full_mark && ruby_enable_autocompact) {
9554 objspace->flags.during_compacting = TRUE;
9555#if RGENGC_CHECK_MODE
9556 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
9560 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9563 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9564 objspace->flags.immediate_sweep = TRUE;
9567 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9569 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
9571 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9573#if USE_DEBUG_COUNTER
9574 RB_DEBUG_COUNTER_INC(gc_count);
9576 if (reason & GPR_FLAG_MAJOR_MASK) {
9577 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9578 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9579 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9580 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9581#if RGENGC_ESTIMATE_OLDMALLOC
9582 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9586 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9587 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9588 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9589 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9590 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9594 objspace->profile.count++;
9595 objspace->profile.latest_gc_info = reason;
9596 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
9597 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9598 objspace->profile.weak_references_count = 0;
9599 objspace->profile.retained_weak_references_count = 0;
9600 gc_prof_setup_new_record(objspace, reason);
9601 gc_reset_malloc_info(objspace, do_full_mark);
9604 GC_ASSERT(during_gc);
9606 gc_prof_timer_start(objspace);
9608 if (gc_marks(objspace, do_full_mark)) {
9612 gc_prof_timer_stop(objspace);
9614 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9621 int marking = is_incremental_marking(objspace);
9622 int sweeping = is_lazy_sweeping(objspace);
9624 if (marking || sweeping) {
9625 unsigned int lock_lev;
9626 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9628 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9630 if (is_incremental_marking(objspace)) {
9631 gc_marking_enter(objspace);
9632 gc_marks_rest(objspace);
9633 gc_marking_exit(objspace);
9638 if (is_lazy_sweeping(objspace)) {
9639 gc_sweeping_enter(objspace);
9640 gc_sweep_rest(objspace);
9641 gc_sweeping_exit(objspace);
9644 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9650 unsigned int reason;
9657 if (is_marking(objspace)) {
9659 if (is_full_marking(objspace)) buff[i++] =
'F';
9660 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9662 else if (is_sweeping(objspace)) {
9664 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9675 static char buff[0x10];
9676 gc_current_status_fill(objspace, buff);
9680#if PRINT_ENTER_EXIT_TICK
9682static tick_t last_exit_tick;
9683static tick_t enter_tick;
9684static int enter_count = 0;
9685static char last_gc_status[0x10];
9688gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9690 if (direction == 0) {
9692 enter_tick = tick();
9693 gc_current_status_fill(objspace, last_gc_status);
9696 tick_t exit_tick = tick();
9697 char current_gc_status[0x10];
9698 gc_current_status_fill(objspace, current_gc_status);
9701 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9702 enter_tick - last_exit_tick,
9703 exit_tick - enter_tick,
9705 last_gc_status, current_gc_status,
9706 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9707 last_exit_tick = exit_tick;
9710 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9712 exit_tick - enter_tick,
9714 last_gc_status, current_gc_status,
9715 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9721gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9728gc_enter_event_cstr(
enum gc_enter_event event)
9731 case gc_enter_event_start:
return "start";
9732 case gc_enter_event_continue:
return "continue";
9733 case gc_enter_event_rest:
return "rest";
9734 case gc_enter_event_finalizer:
return "finalizer";
9735 case gc_enter_event_rb_memerror:
return "rb_memerror";
9741gc_enter_count(
enum gc_enter_event event)
9744 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9745 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue);
break;
9746 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9747 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9748 case gc_enter_event_rb_memerror:
break;
9753#define MEASURE_GC (objspace->flags.measure_gc)
9756static bool current_process_time(
struct timespec *ts);
9761 if (!current_process_time(ts)) {
9772 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
9773 current_process_time(&end_time) &&
9774 end_time.tv_sec >= ts->tv_sec) {
9775 return (uint64_t)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
9776 (end_time.tv_nsec - ts->tv_nsec);
9783gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9785 RB_VM_LOCK_ENTER_LEV(lock_lev);
9788 case gc_enter_event_rest:
9789 if (!is_marking(objspace))
break;
9791 case gc_enter_event_start:
9792 case gc_enter_event_continue:
9800 gc_enter_count(event);
9801 if (UNLIKELY(during_gc != 0)) rb_bug(
"during_gc != 0");
9802 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9805 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9806 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9807 gc_record(objspace, 0, gc_enter_event_cstr(event));
9812gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9814 GC_ASSERT(during_gc != 0);
9817 gc_record(objspace, 1, gc_enter_event_cstr(event));
9818 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9819 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9822 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9828 GC_ASSERT(during_gc != 0);
9830 gc_clock_start(&objspace->profile.marking_start_time);
9836 GC_ASSERT(during_gc != 0);
9838 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
9844 GC_ASSERT(during_gc != 0);
9846 gc_clock_start(&objspace->profile.sweeping_start_time);
9852 GC_ASSERT(during_gc != 0);
9854 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
9858gc_with_gvl(
void *ptr)
9861 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9865garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9867 if (dont_gc_val())
return TRUE;
9868 if (ruby_thread_has_gvl_p()) {
9869 return garbage_collect(objspace, reason);
9874 oar.objspace = objspace;
9875 oar.reason = reason;
9880 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9887gc_set_candidate_object_i(
void *vstart,
void *vend,
size_t stride,
void *data)
9891 for (; v != (
VALUE)vend; v += stride) {
9902 if (!RVALUE_OLD_P(v) && !RVALUE_WB_UNPROTECTED(v)) {
9903 RVALUE_AGE_SET_CANDIDATE(objspace, v);
9915 unsigned int reason = (GPR_FLAG_FULL_MARK |
9916 GPR_FLAG_IMMEDIATE_MARK |
9917 GPR_FLAG_IMMEDIATE_SWEEP |
9921 if (
RTEST(compact)) {
9922 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9924 reason |= GPR_FLAG_COMPACT;
9927 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9928 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9929 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9932 garbage_collect(objspace, reason);
9933 gc_finalize_deferred(objspace);
9939free_empty_pages(
void)
9943 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9946 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
9947 rb_heap_t *tomb_heap = SIZE_POOL_TOMB_HEAP(size_pool);
9949 size_t freed_pages = 0;
9951 struct heap_page **next_page_ptr = &heap->free_pages;
9952 struct heap_page *page = heap->free_pages;
9956 GC_ASSERT(page->final_slots == 0);
9958 struct heap_page *next_page = page->free_next;
9960 if (page->free_slots == page->total_slots) {
9961 heap_unlink_page(objspace, heap, page);
9962 heap_add_page(objspace, size_pool, tomb_heap, page);
9966 *next_page_ptr = page;
9967 next_page_ptr = &page->free_next;
9973 *next_page_ptr = NULL;
9975 size_pool_allocatable_pages_set(objspace, size_pool, size_pool->allocatable_pages + freed_pages);
9978 heap_pages_free_unused_pages(objspace);
9982rb_gc_prepare_heap(
void)
9984 rb_objspace_each_objects(gc_set_candidate_object_i, NULL);
9988#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
10005 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
10033 GC_ASSERT(st_is_member(finalizer_table, obj));
10037 GC_ASSERT(RVALUE_MARKED(obj));
10038 GC_ASSERT(!RVALUE_PINNED(obj));
10043 rb_bug(
"gc_is_moveable_obj: unreachable (%d)", (
int)
BUILTIN_TYPE(obj));
10054 int wb_unprotected;
10060 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
10063 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
10065 GC_ASSERT(!RVALUE_MARKING((
VALUE)src));
10068 marked = rb_objspace_marked_object_p((
VALUE)src);
10069 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
10070 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
10071 bool remembered = RVALUE_REMEMBERED((
VALUE)src);
10072 age = RVALUE_AGE_GET((
VALUE)src);
10075 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)src), (
VALUE)src);
10076 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)src), (
VALUE)src);
10077 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)src), (
VALUE)src);
10078 CLEAR_IN_BITMAP(GET_HEAP_PAGE((
VALUE)src)->remembered_bits, (
VALUE)src);
10082 DURING_GC_COULD_MALLOC_REGION_START();
10084 rb_mv_generic_ivar((
VALUE)src, (
VALUE)dest);
10086 DURING_GC_COULD_MALLOC_REGION_END();
10089 st_data_t srcid = (st_data_t)src,
id;
10093 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
10094 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
10096 DURING_GC_COULD_MALLOC_REGION_START();
10098 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
10099 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
10101 DURING_GC_COULD_MALLOC_REGION_END();
10105 memcpy(dest, src, MIN(src_slot_size, slot_size));
10107 if (RVALUE_OVERHEAD > 0) {
10108 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
10109 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
10111 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
10114 memset(src, 0, src_slot_size);
10115 RVALUE_AGE_RESET((
VALUE)src);
10119 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (
VALUE)dest);
10122 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (
VALUE)dest);
10126 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10129 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10132 if (wb_unprotected) {
10133 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10136 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10139 if (uncollectible) {
10140 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10143 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10146 RVALUE_AGE_SET((
VALUE)dest, age);
10148 src->as.moved.flags =
T_MOVED;
10149 src->as.moved.dummy =
Qundef;
10150 src->as.moved.destination = (
VALUE)dest;
10156#if GC_CAN_COMPILE_COMPACTION
10158compare_pinned_slots(
const void *left,
const void *right,
void *dummy)
10163 left_page = *(
struct heap_page *
const *)left;
10164 right_page = *(
struct heap_page *
const *)right;
10166 return left_page->pinned_slots - right_page->pinned_slots;
10170compare_free_slots(
const void *left,
const void *right,
void *dummy)
10175 left_page = *(
struct heap_page *
const *)left;
10176 right_page = *(
struct heap_page *
const *)right;
10178 return left_page->free_slots - right_page->free_slots;
10182gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func)
10184 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
10187 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10189 struct heap_page *page = 0, **page_list = malloc(size);
10192 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10193 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10194 page_list[i++] = page;
10198 GC_ASSERT((
size_t)i == total_pages);
10205 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10207 for (i = 0; i < total_pages; i++) {
10208 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10209 if (page_list[i]->free_slots != 0) {
10210 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10222 if (ARY_SHARED_P(v)) {
10223 VALUE old_root =
RARRAY(v)->as.heap.aux.shared_root;
10225 UPDATE_IF_MOVED(objspace,
RARRAY(v)->as.heap.aux.shared_root);
10227 VALUE new_root =
RARRAY(v)->as.heap.aux.shared_root;
10229 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10230 size_t offset = (size_t)(
RARRAY(v)->as.heap.ptr -
RARRAY(old_root)->as.ary);
10231 GC_ASSERT(
RARRAY(v)->as.heap.ptr >=
RARRAY(old_root)->as.ary);
10232 RARRAY(v)->as.heap.ptr =
RARRAY(new_root)->as.ary + offset;
10240 for (
long i = 0; i <
len; i++) {
10241 UPDATE_IF_MOVED(objspace, ptr[i]);
10245 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10246 if (rb_ary_embeddable_p(v)) {
10247 rb_ary_make_embedded(v);
10260 if (rb_shape_obj_too_complex(v)) {
10261 gc_ref_update_table_values_only(objspace, ROBJECT_IV_HASH(v));
10265 size_t slot_size = rb_gc_obj_slot_size(v);
10267 if (slot_size >= embed_size && !
RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10269 memcpy(
ROBJECT(v)->as.ary, ptr,
sizeof(
VALUE) * ROBJECT_IV_COUNT(v));
10275 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10276 UPDATE_IF_MOVED(objspace, ptr[i]);
10281hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10285 if (gc_object_moved_p(objspace, (
VALUE)*key)) {
10286 *key = rb_gc_location((
VALUE)*key);
10289 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10290 *value = rb_gc_location((
VALUE)*value);
10293 return ST_CONTINUE;
10297hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
10303 if (gc_object_moved_p(objspace, (
VALUE)key)) {
10307 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10310 return ST_CONTINUE;
10314hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10318 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10319 *value = rb_gc_location((
VALUE)*value);
10322 return ST_CONTINUE;
10326hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
10332 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10335 return ST_CONTINUE;
10341 if (!tbl || tbl->num_entries == 0)
return;
10343 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10349rb_gc_ref_update_table_values_only(
st_table *tbl)
10351 gc_ref_update_table_values_only(&
rb_objspace, tbl);
10357 if (!tbl || tbl->num_entries == 0)
return;
10359 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10369 gc_update_table_refs(objspace, ptr);
10375 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10383 UPDATE_IF_MOVED(objspace, me->owner);
10384 UPDATE_IF_MOVED(objspace, me->defined_class);
10387 switch (def->type) {
10388 case VM_METHOD_TYPE_ISEQ:
10389 if (def->body.iseq.
iseqptr) {
10392 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
10394 case VM_METHOD_TYPE_ATTRSET:
10395 case VM_METHOD_TYPE_IVAR:
10396 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10398 case VM_METHOD_TYPE_BMETHOD:
10399 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10401 case VM_METHOD_TYPE_ALIAS:
10404 case VM_METHOD_TYPE_REFINED:
10407 case VM_METHOD_TYPE_CFUNC:
10408 case VM_METHOD_TYPE_ZSUPER:
10409 case VM_METHOD_TYPE_MISSING:
10410 case VM_METHOD_TYPE_OPTIMIZED:
10411 case VM_METHOD_TYPE_UNDEF:
10412 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10423 for (i=0; i<n; i++) {
10424 UPDATE_IF_MOVED(objspace, values[i]);
10429rb_gc_update_values(
long n,
VALUE *values)
10438 is_pointer_to_heap(objspace, (
void *)obj) &&
10445 switch (imemo_type(obj)) {
10449 if (LIKELY(env->ep)) {
10451 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
10452 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10453 gc_update_values(objspace, (
long)env->env_size, (
VALUE *)env->env);
10458 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10459 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10460 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10463 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10464 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10465 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10466 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10468 case imemo_throw_data:
10469 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10474 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10475 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10478 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10481 rb_iseq_mark_and_move((
rb_iseq_t *)obj,
true);
10484 rb_ast_update_references((
rb_ast_t *)obj);
10486 case imemo_callcache:
10494 if (moved_or_living_object_strictly_p(objspace, cc->klass) &&
10495 moved_or_living_object_strictly_p(objspace, (
VALUE)cc->cme_)) {
10496 UPDATE_IF_MOVED(objspace, cc->klass);
10500 vm_cc_invalidate(cc);
10505 case imemo_constcache:
10508 UPDATE_IF_MOVED(objspace, ice->value);
10511 case imemo_parser_strterm:
10513 case imemo_callinfo:
10516 rb_bug(
"not reachable %d", imemo_type(obj));
10521static enum rb_id_table_iterator_result
10522check_id_table_move(
VALUE value,
void *data)
10526 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10527 return ID_TABLE_REPLACE;
10530 return ID_TABLE_CONTINUE;
10542 void *poisoned = asan_unpoison_object_temporary(value);
10545 destination = (
VALUE)RMOVED(value)->destination;
10549 destination = value;
10555 asan_poison_object(value);
10559 destination = value;
10562 return destination;
10565static enum rb_id_table_iterator_result
10566update_id_table(
VALUE *value,
void *data,
int existing)
10570 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10571 *value = rb_gc_location((
VALUE)*value);
10574 return ID_TABLE_CONTINUE;
10581 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10585static enum rb_id_table_iterator_result
10586update_cc_tbl_i(
VALUE ccs_ptr,
void *data)
10590 VM_ASSERT(vm_ccs_p(ccs));
10592 if (gc_object_moved_p(objspace, (
VALUE)ccs->cme)) {
10596 for (
int i=0; i<ccs->len; i++) {
10597 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].ci)) {
10598 ccs->entries[i].ci = (
struct rb_callinfo *)rb_gc_location((
VALUE)ccs->entries[i].ci);
10600 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].cc)) {
10601 ccs->entries[i].cc = (
struct rb_callcache *)rb_gc_location((
VALUE)ccs->entries[i].cc);
10606 return ID_TABLE_CONTINUE;
10614 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10618static enum rb_id_table_iterator_result
10619update_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10627 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, entry->cref);
10630 entry->class_value = rb_gc_location(entry->class_value);
10632 return ID_TABLE_CONTINUE;
10640 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10644static enum rb_id_table_iterator_result
10645mark_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10653 gc_mark(objspace, (
VALUE) entry->cref);
10655 return ID_TABLE_CONTINUE;
10663 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10667static enum rb_id_table_iterator_result
10668update_const_table(
VALUE value,
void *data)
10673 if (gc_object_moved_p(objspace, ce->value)) {
10674 ce->value = rb_gc_location(ce->value);
10677 if (gc_object_moved_p(objspace, ce->file)) {
10678 ce->file = rb_gc_location(ce->file);
10681 return ID_TABLE_CONTINUE;
10688 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10695 UPDATE_IF_MOVED(objspace, entry->klass);
10696 entry = entry->next;
10703 UPDATE_IF_MOVED(objspace, ext->origin_);
10704 UPDATE_IF_MOVED(objspace, ext->includer);
10705 UPDATE_IF_MOVED(objspace, ext->refined_class);
10706 update_subclass_entries(objspace, ext->subclasses);
10712 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10713 for (
size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10714 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10722 RVALUE *any = RANY(obj);
10724 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
10727 rb_ref_update_generic_ivar(obj);
10733 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
10738 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10740 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10741 update_cc_tbl(objspace, obj);
10742 update_cvc_tbl(objspace, obj);
10743 update_superclasses(objspace, obj);
10745 if (rb_shape_obj_too_complex(obj)) {
10746 gc_ref_update_table_values_only(objspace, RCLASS_IV_HASH(obj));
10749 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10750 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10754 update_class_ext(objspace, RCLASS_EXT(obj));
10755 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10757 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
10761 if (RICLASS_OWNS_M_TBL_P(obj)) {
10762 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10765 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10767 update_class_ext(objspace, RCLASS_EXT(obj));
10768 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10769 update_cc_tbl(objspace, obj);
10773 gc_ref_update_imemo(objspace, obj);
10785 gc_ref_update_array(objspace, obj);
10789 gc_ref_update_hash(objspace, obj);
10790 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10795 if (STR_SHARED_P(obj)) {
10801 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10802 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10803 rb_str_make_embedded(obj);
10814 if (
RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.
type)) {
10815 size_t *offset_list = (
size_t *)RANY(obj)->as.typeddata.type->function.dmark;
10817 for (
size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
10818 VALUE *ref = (
VALUE *)((
char *)ptr + offset);
10820 *ref = rb_gc_location(*ref);
10825 if (compact_func) (*compact_func)(ptr);
10832 gc_ref_update_object(objspace, obj);
10836 if (any->as.file.
fptr) {
10837 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10838 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10847 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10852 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10861 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10863 if (any->as.match.str) {
10864 UPDATE_IF_MOVED(objspace, any->as.match.str);
10869 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10870 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10874 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10875 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10881 long i,
len = RSTRUCT_LEN(obj);
10882 VALUE *ptr = (
VALUE *)RSTRUCT_CONST_PTR(obj);
10884 for (i = 0; i <
len; i++) {
10885 UPDATE_IF_MOVED(objspace, ptr[i]);
10891 rb_gcdebug_print_obj_condition((
VALUE)obj);
10892 rb_obj_info_dump(obj);
10893 rb_bug(
"unreachable");
10899 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10901 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10908 asan_unlock_freelist(page);
10909 asan_lock_freelist(page);
10910 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
10911 page->flags.has_remembered_objects = FALSE;
10914 for (; v != (
VALUE)vend; v += stride) {
10915 void *poisoned = asan_unpoison_object_temporary(v);
10923 if (RVALUE_WB_UNPROTECTED(v)) {
10924 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
10926 if (RVALUE_REMEMBERED(v)) {
10927 page->flags.has_remembered_objects = TRUE;
10929 if (page->flags.before_sweep) {
10930 if (RVALUE_MARKED(v)) {
10931 gc_update_object_references(objspace, v);
10935 gc_update_object_references(objspace, v);
10940 asan_poison_object(v);
10948#define global_symbols ruby_global_symbols
10953 objspace->flags.during_reference_updating =
true;
10956 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10960 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10961 bool should_set_mark_bits = TRUE;
10963 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10965 ccan_list_for_each(&heap->pages, page, page_node) {
10966 uintptr_t start = (uintptr_t)page->start;
10967 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10969 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10970 if (page == heap->sweeping_page) {
10971 should_set_mark_bits = FALSE;
10973 if (should_set_mark_bits) {
10974 gc_setup_mark_bits(page);
10978 rb_vm_update_references(vm);
10979 rb_gc_update_global_tbl();
10980 global_symbols.ids = rb_gc_location(global_symbols.ids);
10981 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10982 gc_ref_update_table_values_only(objspace, objspace->obj_to_id_tbl);
10983 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10984 gc_update_table_refs(objspace, global_symbols.str_sym);
10985 gc_update_table_refs(objspace, finalizer_table);
10987 objspace->flags.during_reference_updating =
false;
10990#if GC_CAN_COMPILE_COMPACTION
11004gc_compact_stats(
VALUE self)
11008 VALUE h = rb_hash_new();
11009 VALUE considered = rb_hash_new();
11010 VALUE moved = rb_hash_new();
11011 VALUE moved_up = rb_hash_new();
11012 VALUE moved_down = rb_hash_new();
11014 for (i=0; i<
T_MASK; i++) {
11015 if (objspace->rcompactor.considered_count_table[i]) {
11016 rb_hash_aset(considered, type_sym(i),
SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
11019 if (objspace->rcompactor.moved_count_table[i]) {
11020 rb_hash_aset(moved, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
11023 if (objspace->rcompactor.moved_up_count_table[i]) {
11024 rb_hash_aset(moved_up, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
11027 if (objspace->rcompactor.moved_down_count_table[i]) {
11028 rb_hash_aset(moved_down, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
11032 rb_hash_aset(h,
ID2SYM(rb_intern(
"considered")), considered);
11033 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved")), moved);
11034 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_up")), moved_up);
11035 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_down")), moved_down);
11040# define gc_compact_stats rb_f_notimplement
11043#if GC_CAN_COMPILE_COMPACTION
11045root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
11048 rb_bug(
"ROOT %s points to MOVED: %p -> %s", category, (
void *)obj, obj_info(rb_gc_location(obj)));
11053reachable_object_check_moved_i(
VALUE ref,
void *data)
11057 rb_bug(
"Object %s points to MOVED: %p -> %s", obj_info(parent), (
void *)ref, obj_info(rb_gc_location(ref)));
11062heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
11065 for (; v != (
VALUE)vend; v += stride) {
11070 void *poisoned = asan_unpoison_object_temporary(v);
11077 if (!rb_objspace_garbage_object_p(v)) {
11078 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
11084 asan_poison_object(v);
11110gc_compact(
VALUE self)
11115 return gc_compact_stats(self);
11118# define gc_compact rb_f_notimplement
11121#if GC_CAN_COMPILE_COMPACTION
11123struct desired_compaction_pages_i_data {
11125 size_t required_slots[SIZE_POOL_COUNT];
11129desired_compaction_pages_i(
struct heap_page *page,
void *data)
11131 struct desired_compaction_pages_i_data *tdata = data;
11134 VALUE vend = vstart + (
VALUE)(page->total_slots * page->size_pool->slot_size);
11137 for (
VALUE v = vstart; v != vend; v += page->size_pool->slot_size) {
11139 void *poisoned = asan_unpoison_object_temporary(v);
11142 asan_poison_object(v);
11147 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v);
11148 size_t dest_pool_idx = dest_pool - size_pools;
11149 tdata->required_slots[dest_pool_idx]++;
11163 if (
RTEST(double_heap)) {
11164 rb_warn(
"double_heap is deprecated, please use expand_heap instead");
11167 RB_VM_LOCK_ENTER();
11172 if (
RTEST(expand_heap)) {
11173 struct desired_compaction_pages_i_data desired_compaction = {
11174 .objspace = objspace,
11175 .required_slots = {0},
11178 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
11181 size_t max_existing_pages = 0;
11182 for(
int i = 0; i < SIZE_POOL_COUNT; i++) {
11184 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11185 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
11188 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11190 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11192 size_t pages_to_add = 0;
11199 pages_to_add += max_existing_pages - heap->total_pages;
11204 pages_to_add += slots_to_pages_for_size_pool(objspace, size_pool, desired_compaction.required_slots[i]);
11211 heap_add_pages(objspace, size_pool, heap, pages_to_add);
11214 else if (
RTEST(double_heap)) {
11215 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11217 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11218 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
11223 if (
RTEST(toward_empty)) {
11224 objspace->rcompactor.compare_func = compare_free_slots;
11227 RB_VM_LOCK_LEAVE();
11231 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11232 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11234 objspace->rcompactor.compare_func = NULL;
11235 return gc_compact_stats(self);
11238# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11251 unless_objspace(objspace) {
return; }
11252 unsigned int reason = GPR_DEFAULT_REASON;
11253 garbage_collect(objspace, reason);
11259 unless_objspace(objspace) {
return FALSE; }
11263#if RGENGC_PROFILE >= 2
11265static const char *type_name(
int type,
VALUE obj);
11268gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
11272 for (i=0; i<
T_MASK; i++) {
11273 const char *
type = type_name(i, 0);
11276 rb_hash_aset(hash,
ID2SYM(rb_intern(name)), result);
11293gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
11295 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11296 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11297#if RGENGC_ESTIMATE_OLDMALLOC
11298 static VALUE sym_oldmalloc;
11300 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11301 static VALUE sym_none, sym_marking, sym_sweeping;
11302 static VALUE sym_weak_references_count, sym_retained_weak_references_count;
11304 VALUE major_by, need_major_by;
11305 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11310 else if (RB_TYPE_P(hash_or_key,
T_HASH)) {
11311 hash = hash_or_key;
11317 if (
NIL_P(sym_major_by)) {
11318#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11321 S(immediate_sweep);
11331#if RGENGC_ESTIMATE_OLDMALLOC
11343 S(weak_references_count);
11344 S(retained_weak_references_count);
11348#define SET(name, attr) \
11349 if (key == sym_##name) \
11351 else if (hash != Qnil) \
11352 rb_hash_aset(hash, sym_##name, (attr));
11355 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11356 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11357 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11358 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11359#if RGENGC_ESTIMATE_OLDMALLOC
11360 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11363 SET(major_by, major_by);
11365 if (orig_flags == 0) {
11366 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11368 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11369 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11370 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11371 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11372#if RGENGC_ESTIMATE_OLDMALLOC
11373 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11376 SET(need_major_by, need_major_by);
11380 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11381 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11382 (flags & GPR_FLAG_METHOD) ? sym_method :
11383 (flags & GPR_FLAG_CAPI) ? sym_capi :
11384 (flags & GPR_FLAG_STRESS) ? sym_stress :
11388 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11389 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11391 if (orig_flags == 0) {
11392 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11393 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11396 SET(weak_references_count,
LONG2FIX(objspace->profile.weak_references_count));
11397 SET(retained_weak_references_count,
LONG2FIX(objspace->profile.retained_weak_references_count));
11401 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11411 return gc_info_decode(objspace, key, 0);
11420 arg = rb_hash_new();
11426 return gc_info_decode(objspace, arg, 0);
11432 gc_stat_sym_marking_time,
11433 gc_stat_sym_sweeping_time,
11434 gc_stat_sym_heap_allocated_pages,
11435 gc_stat_sym_heap_sorted_length,
11436 gc_stat_sym_heap_allocatable_pages,
11437 gc_stat_sym_heap_available_slots,
11438 gc_stat_sym_heap_live_slots,
11439 gc_stat_sym_heap_free_slots,
11440 gc_stat_sym_heap_final_slots,
11441 gc_stat_sym_heap_marked_slots,
11442 gc_stat_sym_heap_eden_pages,
11443 gc_stat_sym_heap_tomb_pages,
11444 gc_stat_sym_total_allocated_pages,
11445 gc_stat_sym_total_freed_pages,
11446 gc_stat_sym_total_allocated_objects,
11447 gc_stat_sym_total_freed_objects,
11448 gc_stat_sym_malloc_increase_bytes,
11449 gc_stat_sym_malloc_increase_bytes_limit,
11450 gc_stat_sym_minor_gc_count,
11451 gc_stat_sym_major_gc_count,
11452 gc_stat_sym_compact_count,
11453 gc_stat_sym_read_barrier_faults,
11454 gc_stat_sym_total_moved_objects,
11455 gc_stat_sym_remembered_wb_unprotected_objects,
11456 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11457 gc_stat_sym_old_objects,
11458 gc_stat_sym_old_objects_limit,
11459#if RGENGC_ESTIMATE_OLDMALLOC
11460 gc_stat_sym_oldmalloc_increase_bytes,
11461 gc_stat_sym_oldmalloc_increase_bytes_limit,
11463 gc_stat_sym_weak_references_count,
11465 gc_stat_sym_total_generated_normal_object_count,
11466 gc_stat_sym_total_generated_shady_object_count,
11467 gc_stat_sym_total_shade_operation_count,
11468 gc_stat_sym_total_promoted_count,
11469 gc_stat_sym_total_remembered_normal_object_count,
11470 gc_stat_sym_total_remembered_shady_object_count,
11475static VALUE gc_stat_symbols[gc_stat_sym_last];
11478setup_gc_stat_symbols(
void)
11480 if (gc_stat_symbols[0] == 0) {
11481#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11486 S(heap_allocated_pages);
11487 S(heap_sorted_length);
11488 S(heap_allocatable_pages);
11489 S(heap_available_slots);
11490 S(heap_live_slots);
11491 S(heap_free_slots);
11492 S(heap_final_slots);
11493 S(heap_marked_slots);
11494 S(heap_eden_pages);
11495 S(heap_tomb_pages);
11496 S(total_allocated_pages);
11497 S(total_freed_pages);
11498 S(total_allocated_objects);
11499 S(total_freed_objects);
11500 S(malloc_increase_bytes);
11501 S(malloc_increase_bytes_limit);
11505 S(read_barrier_faults);
11506 S(total_moved_objects);
11507 S(remembered_wb_unprotected_objects);
11508 S(remembered_wb_unprotected_objects_limit);
11510 S(old_objects_limit);
11511#if RGENGC_ESTIMATE_OLDMALLOC
11512 S(oldmalloc_increase_bytes);
11513 S(oldmalloc_increase_bytes_limit);
11515 S(weak_references_count);
11517 S(total_generated_normal_object_count);
11518 S(total_generated_shady_object_count);
11519 S(total_shade_operation_count);
11520 S(total_promoted_count);
11521 S(total_remembered_normal_object_count);
11522 S(total_remembered_shady_object_count);
11529ns_to_ms(uint64_t ns)
11531 return ns / (1000 * 1000);
11535gc_stat_internal(
VALUE hash_or_sym)
11540 setup_gc_stat_symbols();
11542 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11543 hash = hash_or_sym;
11552#define SET(name, attr) \
11553 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11555 else if (hash != Qnil) \
11556 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11558 SET(count, objspace->profile.count);
11559 SET(time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns));
11560 SET(marking_time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns));
11561 SET(sweeping_time, (
size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
11564 SET(heap_allocated_pages, heap_allocated_pages);
11565 SET(heap_sorted_length, heap_pages_sorted_length);
11566 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11567 SET(heap_available_slots, objspace_available_slots(objspace));
11568 SET(heap_live_slots, objspace_live_slots(objspace));
11569 SET(heap_free_slots, objspace_free_slots(objspace));
11570 SET(heap_final_slots, heap_pages_final_slots);
11571 SET(heap_marked_slots, objspace->marked_slots);
11572 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11573 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11574 SET(total_allocated_pages, total_allocated_pages(objspace));
11575 SET(total_freed_pages, total_freed_pages(objspace));
11576 SET(total_allocated_objects, total_allocated_objects(objspace));
11577 SET(total_freed_objects, total_freed_objects(objspace));
11578 SET(malloc_increase_bytes, malloc_increase);
11579 SET(malloc_increase_bytes_limit, malloc_limit);
11580 SET(minor_gc_count, objspace->profile.minor_gc_count);
11581 SET(major_gc_count, objspace->profile.major_gc_count);
11582 SET(compact_count, objspace->profile.compact_count);
11583 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11584 SET(total_moved_objects, objspace->rcompactor.total_moved);
11585 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11586 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11587 SET(old_objects, objspace->rgengc.old_objects);
11588 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11589#if RGENGC_ESTIMATE_OLDMALLOC
11590 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11591 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11595 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11596 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11597 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11598 SET(total_promoted_count, objspace->profile.total_promoted_count);
11599 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11600 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11605 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11608#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11609 if (hash !=
Qnil) {
11610 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11611 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11612 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
11613 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
11614 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11615 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11626 arg = rb_hash_new();
11629 size_t value = gc_stat_internal(arg);
11632 else if (RB_TYPE_P(arg,
T_HASH)) {
11639 gc_stat_internal(arg);
11647 size_t value = gc_stat_internal(key);
11651 gc_stat_internal(key);
11657enum gc_stat_heap_sym {
11658 gc_stat_heap_sym_slot_size,
11659 gc_stat_heap_sym_heap_allocatable_pages,
11660 gc_stat_heap_sym_heap_eden_pages,
11661 gc_stat_heap_sym_heap_eden_slots,
11662 gc_stat_heap_sym_heap_tomb_pages,
11663 gc_stat_heap_sym_heap_tomb_slots,
11664 gc_stat_heap_sym_total_allocated_pages,
11665 gc_stat_heap_sym_total_freed_pages,
11666 gc_stat_heap_sym_force_major_gc_count,
11667 gc_stat_heap_sym_force_incremental_marking_finish_count,
11668 gc_stat_heap_sym_total_allocated_objects,
11669 gc_stat_heap_sym_total_freed_objects,
11670 gc_stat_heap_sym_last
11673static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11676setup_gc_stat_heap_symbols(
void)
11678 if (gc_stat_heap_symbols[0] == 0) {
11679#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11681 S(heap_allocatable_pages);
11682 S(heap_eden_pages);
11683 S(heap_eden_slots);
11684 S(heap_tomb_pages);
11685 S(heap_tomb_slots);
11686 S(total_allocated_pages);
11687 S(total_freed_pages);
11688 S(force_major_gc_count);
11689 S(force_incremental_marking_finish_count);
11690 S(total_allocated_objects);
11691 S(total_freed_objects);
11697gc_stat_heap_internal(
int size_pool_idx,
VALUE hash_or_sym)
11702 setup_gc_stat_heap_symbols();
11704 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11705 hash = hash_or_sym;
11714 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11715 rb_raise(rb_eArgError,
"size pool index out of range");
11720#define SET(name, attr) \
11721 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11723 else if (hash != Qnil) \
11724 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11726 SET(slot_size, size_pool->slot_size);
11727 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11728 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11729 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11730 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11731 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11732 SET(total_allocated_pages, size_pool->total_allocated_pages);
11733 SET(total_freed_pages, size_pool->total_freed_pages);
11734 SET(force_major_gc_count, size_pool->force_major_gc_count);
11735 SET(force_incremental_marking_finish_count, size_pool->force_incremental_marking_finish_count);
11736 SET(total_allocated_objects, size_pool->total_allocated_objects);
11737 SET(total_freed_objects, size_pool->total_freed_objects);
11741 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11750 if (
NIL_P(heap_name)) {
11752 arg = rb_hash_new();
11754 else if (RB_TYPE_P(arg,
T_HASH)) {
11761 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11764 hash = rb_hash_new();
11765 rb_hash_aset(arg,
INT2FIX(i), hash);
11767 gc_stat_heap_internal(i, hash);
11771 int size_pool_idx =
FIX2INT(heap_name);
11774 arg = rb_hash_new();
11777 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11780 else if (RB_TYPE_P(arg,
T_HASH)) {
11787 gc_stat_heap_internal(size_pool_idx, arg);
11790 rb_raise(
rb_eTypeError,
"heap_name must be nil or an Integer");
11800 return ruby_gc_stress_mode;
11806 objspace->flags.gc_stressful =
RTEST(flag);
11807 objspace->gc_stress_mode = flag;
11814 gc_stress_set(objspace, flag);
11822 return rb_objspace_gc_enable(objspace);
11828 int old = dont_gc_val();
11837 return rb_gc_enable();
11841rb_gc_disable_no_rest(
void)
11844 return gc_disable_no_rest(objspace);
11850 int old = dont_gc_val();
11859 return rb_objspace_gc_disable(objspace);
11866 return gc_disable_no_rest(objspace);
11872 return rb_gc_disable();
11875#if GC_CAN_COMPILE_COMPACTION
11889 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11891 ruby_enable_autocompact =
RTEST(v);
11893#if RGENGC_CHECK_MODE
11894 ruby_autocompact_compare_func = NULL;
11898 if (
id == rb_intern(
"empty")) {
11899 ruby_autocompact_compare_func = compare_free_slots;
11907# define gc_set_auto_compact rb_f_notimplement
11910#if GC_CAN_COMPILE_COMPACTION
11918gc_get_auto_compact(
VALUE _)
11920 return RBOOL(ruby_enable_autocompact);
11923# define gc_get_auto_compact rb_f_notimplement
11927get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
11929 const char *ptr = getenv(name);
11932 if (ptr != NULL && *ptr) {
11935#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11936 val = strtoll(ptr, &end, 0);
11938 val = strtol(ptr, &end, 0);
11941 case 'k':
case 'K':
11945 case 'm':
case 'M':
11949 case 'g':
case 'G':
11950 unit = 1024*1024*1024;
11954 while (*end && isspace((
unsigned char)*end)) end++;
11956 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11960 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11961 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
11966 if (val > 0 && (
size_t)val > lower_bound) {
11968 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
11970 *default_value = (size_t)val;
11975 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
11976 name, val, *default_value, lower_bound);
11985get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
11987 const char *ptr = getenv(name);
11990 if (ptr != NULL && *ptr) {
11992 val =
strtod(ptr, &end);
11993 if (!*ptr || *end) {
11994 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11998 if (accept_zero && val == 0.0) {
12001 else if (val <= lower_bound) {
12003 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
12004 name, val, *default_value, lower_bound);
12007 else if (upper_bound != 0.0 &&
12008 val > upper_bound) {
12010 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
12011 name, val, *default_value, upper_bound);
12021 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
12022 *default_value = val;
12031 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
12033 char env_key[
sizeof(
"RUBY_GC_HEAP_" "_INIT_SLOTS") +
DECIMAL_SIZE_OF_BITS(
sizeof(
int) * CHAR_BIT)];
12034 snprintf(env_key,
sizeof(env_key),
"RUBY_GC_HEAP_%d_INIT_SLOTS", i);
12036 size_t size_pool_init_slots = gc_params.size_pool_init_slots[i];
12037 if (get_envparam_size(env_key, &size_pool_init_slots, 0)) {
12038 gc_params.size_pool_init_slots[i] = size_pool_init_slots;
12041 if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
12042 size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
12043 size_pool->allocatable_pages = slots_to_pages_for_size_pool(objspace, size_pool, slots);
12048 size_pool->allocatable_pages = 0;
12051 heap_pages_expand_sorted(objspace);
12095ruby_gc_set_params(
void)
12099 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
12103 gc_set_initial_pages(objspace);
12105 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
12106 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
12107 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
12109 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
12110 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
12111 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
12112 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
12113 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
12114 get_envparam_double(
"RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
12116 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
12117 malloc_limit = gc_params.malloc_limit_min;
12119 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
12120 if (!gc_params.malloc_limit_max) {
12121 gc_params.malloc_limit_max = SIZE_MAX;
12123 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
12125#if RGENGC_ESTIMATE_OLDMALLOC
12126 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
12127 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
12129 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
12130 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
12135reachable_objects_from_callback(
VALUE obj)
12138 cr->mfd->mark_func(obj, cr->mfd->data);
12142rb_objspace_reachable_objects_from(
VALUE obj,
void (func)(
VALUE,
void *),
void *data)
12146 RB_VM_LOCK_ENTER();
12148 if (during_gc) rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
12150 if (is_markable_object(obj)) {
12152 struct gc_mark_func_data_struct mfd = {
12155 }, *prev_mfd = cr->mfd;
12158 gc_mark_children(objspace, obj);
12159 cr->mfd = prev_mfd;
12162 RB_VM_LOCK_LEAVE();
12166 const char *category;
12167 void (*func)(
const char *category,
VALUE,
void *);
12172root_objects_from(
VALUE obj,
void *ptr)
12175 (*data->func)(data->category, obj, data->data);
12179rb_objspace_reachable_objects_from_root(
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
12182 objspace_reachable_objects_from_root(objspace, func, passing_data);
12186objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
12188 if (during_gc) rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
12193 .data = passing_data,
12195 struct gc_mark_func_data_struct mfd = {
12196 .mark_func = root_objects_from,
12198 }, *prev_mfd = cr->mfd;
12201 gc_mark_roots(objspace, &data.category);
12202 cr->mfd = prev_mfd;
12216gc_vraise(
void *ptr)
12219 rb_vraise(argv->exc, argv->fmt, *argv->ap);
12224gc_raise(
VALUE exc,
const char *fmt, ...)
12232 if (ruby_thread_has_gvl_p()) {
12242 fprintf(stderr,
"%s",
"[FATAL] ");
12243 vfprintf(stderr, fmt, ap);
12250static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
12253negative_size_allocation_error(
const char *msg)
12259ruby_memerror_body(
void *dummy)
12265NORETURN(
static void ruby_memerror(
void));
12270 if (ruby_thread_has_gvl_p()) {
12279 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12282 exit(EXIT_FAILURE);
12289 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12300 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12305 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12306 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12307 exit(EXIT_FAILURE);
12309 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12310 rb_ec_raised_clear(ec);
12313 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12314 exc = ruby_vm_special_exception_copy(exc);
12317 EC_JUMP_TAG(ec, TAG_RAISE);
12321rb_aligned_malloc(
size_t alignment,
size_t size)
12324 GC_ASSERT(((alignment - 1) & alignment) == 0);
12325 GC_ASSERT(alignment %
sizeof(
void*) == 0);
12329#if defined __MINGW32__
12330 res = __mingw_aligned_malloc(size, alignment);
12331#elif defined _WIN32
12332 void *_aligned_malloc(
size_t,
size_t);
12333 res = _aligned_malloc(size, alignment);
12334#elif defined(HAVE_POSIX_MEMALIGN)
12335 if (posix_memalign(&res, alignment, size) != 0) {
12338#elif defined(HAVE_MEMALIGN)
12339 res = memalign(alignment, size);
12342 res = malloc(alignment + size +
sizeof(
void*));
12343 aligned = (
char*)res + alignment +
sizeof(
void*);
12344 aligned -= ((
VALUE)aligned & (alignment - 1));
12345 ((
void**)aligned)[-1] = res;
12346 res = (
void*)aligned;
12349 GC_ASSERT((uintptr_t)res % alignment == 0);
12355rb_aligned_free(
void *ptr,
size_t size)
12357#if defined __MINGW32__
12358 __mingw_aligned_free(ptr);
12359#elif defined _WIN32
12360 _aligned_free(ptr);
12361#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12364 free(((
void**)ptr)[-1]);
12368static inline size_t
12369objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
12371#ifdef HAVE_MALLOC_USABLE_SIZE
12372 return malloc_usable_size(ptr);
12379 MEMOP_TYPE_MALLOC = 0,
12385atomic_sub_nounderflow(
size_t *var,
size_t sub)
12387 if (sub == 0)
return;
12391 if (val < sub) sub = val;
12392 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
12400 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12401 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12403 if (gc_stress_full_mark_after_malloc_p()) {
12404 reason |= GPR_FLAG_FULL_MARK;
12406 garbage_collect_with_gvl(objspace, reason);
12411objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12413 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
12415 type == MEMOP_TYPE_MALLOC ?
"malloc" :
12416 type == MEMOP_TYPE_FREE ?
"free " :
12417 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
12418 new_size, old_size);
12423objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12425 if (new_size > old_size) {
12426 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12427#if RGENGC_ESTIMATE_OLDMALLOC
12428 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12432 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12433#if RGENGC_ESTIMATE_OLDMALLOC
12434 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12438 if (
type == MEMOP_TYPE_MALLOC) {
12441 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12445 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12449#if MALLOC_ALLOCATED_SIZE
12450 if (new_size >= old_size) {
12451 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12454 size_t dec_size = old_size - new_size;
12455 size_t allocated_size = objspace->malloc_params.allocated_size;
12457#if MALLOC_ALLOCATED_SIZE_CHECK
12458 if (allocated_size < dec_size) {
12459 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
12462 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12466 case MEMOP_TYPE_MALLOC:
12467 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12469 case MEMOP_TYPE_FREE:
12471 size_t allocations = objspace->malloc_params.allocations;
12472 if (allocations > 0) {
12473 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12475#if MALLOC_ALLOCATED_SIZE_CHECK
12477 GC_ASSERT(objspace->malloc_params.allocations > 0);
12482 case MEMOP_TYPE_REALLOC:
break;
12488#define objspace_malloc_increase(...) \
12489 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12490 !malloc_increase_done; \
12491 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12495#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12502#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12503const char *ruby_malloc_info_file;
12504int ruby_malloc_info_line;
12507static inline size_t
12508objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
12510 if (size == 0) size = 1;
12512#if CALC_EXACT_MALLOC_SIZE
12526 return during_gc && !dont_gc_val() && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12529static inline void *
12530objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
12532 size = objspace_malloc_size(objspace, mem, size);
12533 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12535#if CALC_EXACT_MALLOC_SIZE
12539#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12540 info->gen = objspace->profile.count;
12541 info->file = ruby_malloc_info_file;
12542 info->line = info->file ? ruby_malloc_info_line : 0;
12551#if defined(__GNUC__) && RUBY_DEBUG
12552#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12555#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12556# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12559#define GC_MEMERROR(...) \
12560 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12562#define TRY_WITH_GC(siz, expr) do { \
12563 const gc_profile_record_flag gpr = \
12564 GPR_FLAG_FULL_MARK | \
12565 GPR_FLAG_IMMEDIATE_MARK | \
12566 GPR_FLAG_IMMEDIATE_SWEEP | \
12568 objspace_malloc_gc_stress(objspace); \
12570 if (LIKELY((expr))) { \
12573 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12575 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12577 else if ((expr)) { \
12581 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12582 "%"PRIdSIZE" bytes for %s", \
12588check_malloc_not_in_gc(
rb_objspace_t *objspace,
const char *msg)
12590 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12593 rb_bug(
"Cannot %s during GC", msg);
12603 check_malloc_not_in_gc(objspace,
"malloc");
12607 size = objspace_malloc_prepare(objspace, size);
12608 TRY_WITH_GC(size, mem = malloc(size));
12609 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12610 return objspace_malloc_fixup(objspace, mem, size);
12613static inline size_t
12614xmalloc2_size(
const size_t count,
const size_t elsize)
12616 return size_mul_or_raise(count, elsize, rb_eArgError);
12620objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
12622 check_malloc_not_in_gc(objspace,
"realloc");
12626 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
12633 if (new_size == 0) {
12634 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12657 objspace_xfree(objspace, ptr, old_size);
12671#if CALC_EXACT_MALLOC_SIZE
12676 old_size = info->size;
12680 old_size = objspace_malloc_size(objspace, ptr, old_size);
12682 new_size = objspace_malloc_size(objspace, mem, new_size);
12684#if CALC_EXACT_MALLOC_SIZE
12687 info->size = new_size;
12692 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12694 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12698#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12700#define MALLOC_INFO_GEN_SIZE 100
12701#define MALLOC_INFO_SIZE_SIZE 10
12702static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12703static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12704static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12705static st_table *malloc_info_file_table;
12708mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12710 const char *file = (
void *)key;
12711 const size_t *data = (
void *)val;
12713 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
12715 return ST_CONTINUE;
12720rb_malloc_info_show_results(
void)
12724 fprintf(stderr,
"* malloc_info gen statistics\n");
12725 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12726 if (i == MALLOC_INFO_GEN_SIZE-1) {
12727 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12730 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12734 fprintf(stderr,
"* malloc_info size statistics\n");
12735 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12737 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
12739 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
12741 if (malloc_info_file_table) {
12742 fprintf(stderr,
"* malloc_info file statistics\n");
12743 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12748rb_malloc_info_show_results(
void)
12754objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
12763#if CALC_EXACT_MALLOC_SIZE
12766 old_size = info->size;
12768#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12770 int gen = (int)(objspace->profile.count - info->gen);
12771 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12774 malloc_info_gen_cnt[gen_index]++;
12775 malloc_info_gen_size[gen_index] += info->size;
12777 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12778 size_t s = 16 << i;
12779 if (info->size <= s) {
12780 malloc_info_size[i]++;
12784 malloc_info_size[i]++;
12788 st_data_t key = (st_data_t)info->file, d;
12791 if (malloc_info_file_table == NULL) {
12792 malloc_info_file_table = st_init_numtable_with_size(1024);
12794 if (st_lookup(malloc_info_file_table, key, &d)) {
12796 data = (
size_t *)d;
12799 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
12800 if (data == NULL) rb_bug(
"objspace_xfree: can not allocate memory");
12801 data[0] = data[1] = 0;
12802 st_insert(malloc_info_file_table, key, (st_data_t)data);
12805 data[1] += info->size;
12807 if (0 && gen >= 2) {
12809 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
12810 info->size, gen, info->file, info->line);
12813 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
12820 old_size = objspace_malloc_size(objspace, ptr, old_size);
12822 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12825 RB_DEBUG_COUNTER_INC(heap_xfree);
12830ruby_xmalloc0(
size_t size)
12836ruby_xmalloc_body(
size_t size)
12838 if ((ssize_t)size < 0) {
12839 negative_size_allocation_error(
"too large allocation size");
12841 return ruby_xmalloc0(size);
12845ruby_malloc_size_overflow(
size_t count,
size_t elsize)
12847 rb_raise(rb_eArgError,
12848 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
12853ruby_xmalloc2_body(
size_t n,
size_t size)
12855 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
12861 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12862 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
12863#if RGENGC_CHECK_MODE || RUBY_DEBUG
12864 rb_bug(
"Cannot calloc during GC");
12870 size = objspace_malloc_prepare(objspace, size);
12871 TRY_WITH_GC(size, mem = calloc1(size));
12872 return objspace_malloc_fixup(objspace, mem, size);
12876ruby_xcalloc_body(
size_t n,
size_t size)
12878 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
12881#ifdef ruby_sized_xrealloc
12882#undef ruby_sized_xrealloc
12885ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
12887 if ((ssize_t)new_size < 0) {
12888 negative_size_allocation_error(
"too large allocation size");
12891 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
12895ruby_xrealloc_body(
void *ptr,
size_t new_size)
12897 return ruby_sized_xrealloc(ptr, new_size, 0);
12900#ifdef ruby_sized_xrealloc2
12901#undef ruby_sized_xrealloc2
12904ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
12906 size_t len = xmalloc2_size(n, size);
12911ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
12913 return ruby_sized_xrealloc2(ptr, n, size, 0);
12916#ifdef ruby_sized_xfree
12917#undef ruby_sized_xfree
12920ruby_sized_xfree(
void *x,
size_t size)
12926 if (LIKELY(GET_VM())) {
12938 ruby_sized_xfree(x, 0);
12942rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
12944 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12945 return ruby_xmalloc(w);
12949rb_xcalloc_mul_add(
size_t x,
size_t y,
size_t z)
12951 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12952 return ruby_xcalloc(w, 1);
12956rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
12958 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12959 return ruby_xrealloc((
void *)p, w);
12963rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12965 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12966 return ruby_xmalloc(u);
12970rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12972 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12973 return ruby_xcalloc(u, 1);
12980ruby_mimmalloc(
size_t size)
12983#if CALC_EXACT_MALLOC_SIZE
12986 mem = malloc(size);
12987#if CALC_EXACT_MALLOC_SIZE
12996#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13008ruby_mimfree(
void *ptr)
13010#if CALC_EXACT_MALLOC_SIZE
13018rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
13026 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
13028 ptr = ruby_xmalloc0(size);
13036rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
13040 if (
len < 0 || (cnt = (
long)roomof(
len,
sizeof(
VALUE))) < 0) {
13041 rb_raise(rb_eArgError,
"negative buffer size (or size too big)");
13044 return rb_alloc_tmp_buffer_with_count(store,
len, cnt);
13048rb_free_tmp_buffer(
volatile VALUE *store)
13052 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
13058#if MALLOC_ALLOCATED_SIZE
13069gc_malloc_allocated_size(
VALUE self)
13084gc_malloc_allocations(
VALUE self)
13091rb_gc_adjust_memory_usage(ssize_t diff)
13093 unless_objspace(objspace) {
return; }
13096 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
13098 else if (diff < 0) {
13099 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
13107#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13110current_process_time(
struct timespec *ts)
13112#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13114 static int try_clock_gettime = 1;
13115 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13119 try_clock_gettime = 0;
13126 struct rusage usage;
13128 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13129 time = usage.ru_utime;
13130 ts->tv_sec = time.tv_sec;
13131 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13139 FILETIME creation_time, exit_time, kernel_time, user_time;
13142 if (GetProcessTimes(GetCurrentProcess(),
13143 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13144 memcpy(&ui, &user_time,
sizeof(FILETIME));
13145#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13146 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13147 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13157getrusage_time(
void)
13160 if (current_process_time(&ts)) {
13161 return ts.tv_sec + ts.tv_nsec * 1e-9;
13170gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
13172 if (objspace->profile.run) {
13173 size_t index = objspace->profile.next_index;
13177 objspace->profile.next_index++;
13179 if (!objspace->profile.records) {
13180 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13181 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13183 if (index >= objspace->profile.size) {
13185 objspace->profile.size += 1000;
13186 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13187 if (!ptr) rb_memerror();
13188 objspace->profile.records = ptr;
13190 if (!objspace->profile.records) {
13191 rb_bug(
"gc_profile malloc or realloc miss");
13193 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13197 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13198#if MALLOC_ALLOCATED_SIZE
13199 record->allocated_size = malloc_allocated_size;
13201#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13204 struct rusage usage;
13205 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13206 record->maxrss = usage.ru_maxrss;
13207 record->minflt = usage.ru_minflt;
13208 record->majflt = usage.ru_majflt;
13219 if (gc_prof_enabled(objspace)) {
13221#if GC_PROFILE_MORE_DETAIL
13222 record->prepare_time = objspace->profile.prepare_time;
13224 record->gc_time = 0;
13225 record->gc_invoke_time = getrusage_time();
13230elapsed_time_from(
double time)
13232 double now = getrusage_time();
13244 if (gc_prof_enabled(objspace)) {
13246 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13247 record->gc_invoke_time -= objspace->profile.invoke_time;
13251#define RUBY_DTRACE_GC_HOOK(name) \
13252 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13256 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13257#if GC_PROFILE_MORE_DETAIL
13258 if (gc_prof_enabled(objspace)) {
13259 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13267 RUBY_DTRACE_GC_HOOK(MARK_END);
13268#if GC_PROFILE_MORE_DETAIL
13269 if (gc_prof_enabled(objspace)) {
13271 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13279 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13280 if (gc_prof_enabled(objspace)) {
13283 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13284 objspace->profile.gc_sweep_start_time = getrusage_time();
13292 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13294 if (gc_prof_enabled(objspace)) {
13298 if (record->gc_time > 0) {
13299 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13301 record->gc_time += sweep_time;
13303 else if (GC_PROFILE_MORE_DETAIL) {
13304 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13307#if GC_PROFILE_MORE_DETAIL
13308 record->gc_sweep_time += sweep_time;
13309 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13311 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13318#if GC_PROFILE_MORE_DETAIL
13319 if (gc_prof_enabled(objspace)) {
13321 record->allocate_increase = malloc_increase;
13322 record->allocate_limit = malloc_limit;
13330 if (gc_prof_enabled(objspace)) {
13332 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
13333 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13335#if GC_PROFILE_MORE_DETAIL
13336 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13337 record->heap_live_objects = live;
13338 record->heap_free_objects = total - live;
13341 record->heap_total_objects = total;
13342 record->heap_use_size = live *
sizeof(
RVALUE);
13343 record->heap_total_size = total *
sizeof(
RVALUE);
13356gc_profile_clear(
VALUE _)
13359 void *p = objspace->profile.records;
13360 objspace->profile.records = NULL;
13361 objspace->profile.size = 0;
13362 objspace->profile.next_index = 0;
13363 objspace->profile.current_record = 0;
13419gc_profile_record_get(
VALUE _)
13422 VALUE gc_profile = rb_ary_new();
13426 if (!objspace->profile.run) {
13430 for (i =0; i < objspace->profile.next_index; i++) {
13433 prof = rb_hash_new();
13434 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
13435 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_TIME")),
DBL2NUM(record->gc_time));
13436 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_INVOKE_TIME")),
DBL2NUM(record->gc_invoke_time));
13437 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_SIZE")),
SIZET2NUM(record->heap_use_size));
13438 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_SIZE")),
SIZET2NUM(record->heap_total_size));
13439 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_OBJECTS")),
SIZET2NUM(record->heap_total_objects));
13440 rb_hash_aset(prof,
ID2SYM(rb_intern(
"MOVED_OBJECTS")),
SIZET2NUM(record->moved_objects));
13441 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_IS_MARKED")),
Qtrue);
13442#if GC_PROFILE_MORE_DETAIL
13443 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_MARK_TIME")),
DBL2NUM(record->gc_mark_time));
13444 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_SWEEP_TIME")),
DBL2NUM(record->gc_sweep_time));
13445 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_INCREASE")),
SIZET2NUM(record->allocate_increase));
13446 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_LIMIT")),
SIZET2NUM(record->allocate_limit));
13447 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_PAGES")),
SIZET2NUM(record->heap_use_pages));
13448 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_LIVE_OBJECTS")),
SIZET2NUM(record->heap_live_objects));
13449 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_FREE_OBJECTS")),
SIZET2NUM(record->heap_free_objects));
13451 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMOVING_OBJECTS")),
SIZET2NUM(record->removing_objects));
13452 rb_hash_aset(prof,
ID2SYM(rb_intern(
"EMPTY_OBJECTS")),
SIZET2NUM(record->empty_objects));
13454 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13457#if RGENGC_PROFILE > 0
13458 rb_hash_aset(prof,
ID2SYM(rb_intern(
"OLD_OBJECTS")),
SIZET2NUM(record->old_objects));
13459 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_NORMAL_OBJECTS")),
SIZET2NUM(record->remembered_normal_objects));
13460 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_SHADY_OBJECTS")),
SIZET2NUM(record->remembered_shady_objects));
13462 rb_ary_push(gc_profile, prof);
13468#if GC_PROFILE_MORE_DETAIL
13469#define MAJOR_REASON_MAX 0x10
13472gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
13474 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13477 if (reason == GPR_FLAG_NONE) {
13483 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13484 buff[i++] = #x[0]; \
13485 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13491#if RGENGC_ESTIMATE_OLDMALLOC
13504 size_t count = objspace->profile.next_index;
13505#ifdef MAJOR_REASON_MAX
13506 char reason_str[MAJOR_REASON_MAX];
13509 if (objspace->profile.run && count ) {
13513 append(out, rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
13514 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13516 for (i = 0; i < count; i++) {
13517 record = &objspace->profile.records[i];
13518 append(out, rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
13519 i+1, record->gc_invoke_time, record->heap_use_size,
13520 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13523#if GC_PROFILE_MORE_DETAIL
13524 const char *str =
"\n\n" \
13526 "Prepare Time = Previously GC's rest sweep time\n"
13527 "Index Flags Allocate Inc. Allocate Limit"
13528#if CALC_EXACT_MALLOC_SIZE
13531 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13533 " OldgenObj RemNormObj RemShadObj"
13535#if GC_PROFILE_DETAIL_MEMORY
13536 " MaxRSS(KB) MinorFLT MajorFLT"
13541 for (i = 0; i < count; i++) {
13542 record = &objspace->profile.records[i];
13543 append(out, rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
13544#
if CALC_EXACT_MALLOC_SIZE
13547 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13549 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13551#
if GC_PROFILE_DETAIL_MEMORY
13557 gc_profile_dump_major_reason(record->flags, reason_str),
13558 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
13559 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
13560 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
13561 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
13562 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
13563 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
13564 record->allocate_increase, record->allocate_limit,
13565#if CALC_EXACT_MALLOC_SIZE
13566 record->allocated_size,
13568 record->heap_use_pages,
13569 record->gc_mark_time*1000,
13570 record->gc_sweep_time*1000,
13571 record->prepare_time*1000,
13573 record->heap_live_objects,
13574 record->heap_free_objects,
13575 record->removing_objects,
13576 record->empty_objects
13579 record->old_objects,
13580 record->remembered_normal_objects,
13581 record->remembered_shady_objects
13583#if GC_PROFILE_DETAIL_MEMORY
13585 record->maxrss / 1024,
13608gc_profile_result(
VALUE _)
13610 VALUE str = rb_str_buf_new(0);
13611 gc_profile_dump_on(str, rb_str_buf_append);
13625gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
13630 gc_profile_dump_on(out, rb_io_write);
13643gc_profile_total_time(
VALUE self)
13648 if (objspace->profile.run && objspace->profile.next_index > 0) {
13650 size_t count = objspace->profile.next_index;
13652 for (i = 0; i < count; i++) {
13653 time += objspace->profile.records[i].gc_time;
13667gc_profile_enable_get(
VALUE self)
13670 return RBOOL(objspace->profile.run);
13682gc_profile_enable(
VALUE _)
13685 objspace->profile.run = TRUE;
13686 objspace->profile.current_record = 0;
13699gc_profile_disable(
VALUE _)
13703 objspace->profile.run = FALSE;
13704 objspace->profile.current_record = 0;
13716#define TYPE_NAME(t) case (t): return #t;
13743 if (obj && rb_objspace_data_type_name(obj)) {
13744 return rb_objspace_data_type_name(obj);
13753obj_type_name(
VALUE obj)
13755 return type_name(
TYPE(obj), obj);
13759rb_method_type_name(rb_method_type_t
type)
13762 case VM_METHOD_TYPE_ISEQ:
return "iseq";
13763 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
13764 case VM_METHOD_TYPE_IVAR:
return "ivar";
13765 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
13766 case VM_METHOD_TYPE_ALIAS:
return "alias";
13767 case VM_METHOD_TYPE_REFINED:
return "refined";
13768 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
13769 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
13770 case VM_METHOD_TYPE_MISSING:
return "missing";
13771 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
13772 case VM_METHOD_TYPE_UNDEF:
return "undef";
13773 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
13775 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
13779rb_raw_iseq_info(
char *
const buff,
const size_t buff_size,
const rb_iseq_t *iseq)
13781 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj,
T_MOVED)) {
13782 VALUE path = rb_iseq_path(iseq);
13783 int n = ISEQ_BODY(iseq)->location.first_lineno;
13784 snprintf(buff, buff_size,
" %s@%s:%d",
13785 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13786 RSTRING_PTR(path), n);
13791str_len_no_raise(
VALUE str)
13793 long len = RSTRING_LEN(str);
13794 if (
len < 0)
return 0;
13795 if (
len > INT_MAX)
return INT_MAX;
13799#define BUFF_ARGS buff + pos, buff_size - pos
13800#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13801#define APPEND_S(s) do { \
13802 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13806 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13809#define TF(c) ((c) != 0 ? "true" : "false")
13810#define C(c, s) ((c) != 0 ? (s) : " ")
13813rb_raw_obj_info_common(
char *
const buff,
const size_t buff_size,
const VALUE obj)
13818 APPEND_F(
"%s", obj_type_name(obj));
13824 APPEND_F(
" %s", rb_id2name(
SYM2ID(obj)));
13828 const int age = RVALUE_AGE_GET(obj);
13830 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
13831 APPEND_F(
"%p [%d%s%s%s%s%s%s] %s ",
13833 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
13834 C(RVALUE_MARK_BITMAP(obj),
"M"),
13835 C(RVALUE_PIN_BITMAP(obj),
"P"),
13836 C(RVALUE_MARKING_BITMAP(obj),
"R"),
13837 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
13838 C(rb_objspace_garbage_object_p(obj),
"G"),
13839 obj_type_name(obj));
13843 APPEND_F(
"%p [%dXXXX] %s",
13845 obj_type_name(obj));
13848 if (internal_object_p(obj)) {
13851 else if (
RBASIC(obj)->klass == 0) {
13852 APPEND_S(
"(temporary internal)");
13856 if (!
NIL_P(class_path)) {
13857 APPEND_F(
"(%s)", RSTRING_PTR(class_path));
13862 APPEND_F(
"@%s:%d", RANY(obj)->file, RANY(obj)->line);
13871rb_raw_obj_info_buitin_type(
char *
const buff,
const size_t buff_size,
const VALUE obj,
size_t pos)
13878 UNEXPECTED_NODE(rb_raw_obj_info);
13881 if (ARY_SHARED_P(obj)) {
13882 APPEND_S(
"shared -> ");
13883 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
13885 else if (ARY_EMBED_P(obj)) {
13886 APPEND_F(
"[%s%s] len: %ld (embed)",
13887 C(ARY_EMBED_P(obj),
"E"),
13888 C(ARY_SHARED_P(obj),
"S"),
13892 APPEND_F(
"[%s%s] len: %ld, capa:%ld ptr:%p",
13893 C(ARY_EMBED_P(obj),
"E"),
13894 C(ARY_SHARED_P(obj),
"S"),
13896 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.
capa,
13901 if (STR_SHARED_P(obj)) {
13902 APPEND_F(
" [shared] len: %ld", RSTRING_LEN(obj));
13905 if (STR_EMBED_P(obj)) APPEND_S(
" [embed]");
13907 APPEND_F(
" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj),
rb_str_capacity(obj));
13909 APPEND_F(
" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
13913 VALUE fstr = RSYMBOL(obj)->fstr;
13914 ID id = RSYMBOL(obj)->id;
13916 APPEND_F(
":%s id:%d", RSTRING_PTR(fstr), (
unsigned int)
id);
13919 APPEND_F(
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id);
13924 APPEND_F(
"-> %p", (
void*)rb_gc_location(obj));
13928 APPEND_F(
"[%c] %"PRIdSIZE,
13929 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
13937 if (!
NIL_P(class_path)) {
13938 APPEND_F(
"%s", RSTRING_PTR(class_path));
13941 APPEND_S(
"(anon)");
13948 if (!
NIL_P(class_path)) {
13949 APPEND_F(
"src:%s", RSTRING_PTR(class_path));
13955 if (rb_shape_obj_too_complex(obj)) {
13956 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
13957 APPEND_F(
"(too_complex) len:%zu", hash_len);
13960 uint32_t
len = ROBJECT_IV_CAPACITY(obj);
13962 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13963 APPEND_F(
"(embed) len:%d",
len);
13967 APPEND_F(
"len:%d ptr:%p",
len, (
void *)ptr);
13976 (block = vm_proc_block(obj)) != NULL &&
13977 (vm_block_type(block) == block_type_iseq) &&
13978 (iseq = vm_block_iseq(block)) != NULL) {
13979 rb_raw_iseq_info(BUFF_ARGS, iseq);
13981 else if (rb_ractor_p(obj)) {
13984 APPEND_F(
"r:%d", r->pub.id);
13988 const char *
const type_name = rb_objspace_data_type_name(obj);
13990 APPEND_F(
"%s", type_name);
13996 APPEND_F(
"<%s> ", rb_imemo_name(imemo_type(obj)));
13998 switch (imemo_type(obj)) {
14003 APPEND_F(
":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14004 rb_id2name(me->called_id),
14005 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
14006 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
14007 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
14008 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
14009 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
14010 me->def ? rb_method_type_name(me->def->
type) :
"NULL",
14011 me->def ? me->def->aliased : -1,
14013 (void *)me->defined_class);
14016 switch (me->def->type) {
14017 case VM_METHOD_TYPE_ISEQ:
14018 APPEND_S(
" (iseq:");
14019 rb_raw_obj_info(BUFF_ARGS, (
VALUE)me->def->body.iseq.
iseqptr);
14031 rb_raw_iseq_info(BUFF_ARGS, iseq);
14034 case imemo_callinfo:
14037 APPEND_F(
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
14038 rb_id2name(vm_ci_mid(ci)),
14041 vm_ci_kwarg(ci) ?
"available" :
"NULL");
14044 case imemo_callcache:
14050 APPEND_F(
"(klass:%s cme:%s%s (%p) call:%p",
14051 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") : RSTRING_PTR(class_path),
14052 cme ? rb_id2name(cme->called_id) :
"<NULL>",
14053 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
14055 (void *)vm_cc_call(cc));
14075rb_raw_obj_info(
char *
const buff,
const size_t buff_size,
VALUE obj)
14077 asan_unpoisoning_object(obj) {
14078 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14079 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14080 if (pos >= buff_size) {}
14091#define OBJ_INFO_BUFFERS_NUM 10
14092#define OBJ_INFO_BUFFERS_SIZE 0x100
14094static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14102 if (UNLIKELY(oldval >= maxval - 1)) {
14113 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14114 char *
const buff = obj_info_buffers[index];
14115 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14121 return obj_type_name(obj);
14126rb_obj_info(
VALUE obj)
14128 return obj_info(obj);
14132rb_obj_info_dump(
VALUE obj)
14135 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14139rb_obj_info_dump_loc(
VALUE obj,
const char *file,
int line,
const char *func)
14142 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14148rb_gcdebug_print_obj_condition(
VALUE obj)
14152 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14155 fprintf(stderr,
"moved?: true\n");
14158 fprintf(stderr,
"moved?: false\n");
14160 if (is_pointer_to_heap(objspace, (
void *)obj)) {
14161 fprintf(stderr,
"pointer to heap?: true\n");
14164 fprintf(stderr,
"pointer to heap?: false\n");
14168 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
14169 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
14170 fprintf(stderr,
"age? : %d\n", RVALUE_AGE_GET(obj));
14171 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
14172 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
14173 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
14175 if (is_lazy_sweeping(objspace)) {
14176 fprintf(stderr,
"lazy sweeping?: true\n");
14177 fprintf(stderr,
"swept?: %s\n", is_swept_object(obj) ?
"done" :
"not yet");
14180 fprintf(stderr,
"lazy sweeping?: false\n");
14187 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
14192rb_gcdebug_sentinel(
VALUE obj,
const char *name)
14207rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14211 if (!stress_to_class) {
14212 set_stress_to_class(rb_ary_hidden_new(argc));
14214 rb_ary_cat(stress_to_class, argv, argc);
14227rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14232 if (stress_to_class) {
14233 for (i = 0; i < argc; ++i) {
14234 rb_ary_delete_same(stress_to_class, argv[i]);
14237 set_stress_to_class(0);
14295 malloc_offset = gc_compute_malloc_offset();
14297 VALUE rb_mObjSpace;
14298 VALUE rb_mProfiler;
14299 VALUE gc_constants;
14303 gc_constants = rb_hash_new();
14304 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"DEBUG")), RBOOL(GC_DEBUG));
14305 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"BASE_SLOT_SIZE")),
SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14306 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OVERHEAD")),
SIZET2NUM(RVALUE_OVERHEAD));
14308 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_OBJ_LIMIT")),
SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14309 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_BITMAP_SIZE")),
SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14310 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_SIZE")),
SIZET2NUM(HEAP_PAGE_SIZE));
14311 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"SIZE_POOL_COUNT")),
LONG2FIX(SIZE_POOL_COUNT));
14312 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14313 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OLD_AGE")),
LONG2FIX(RVALUE_OLD_AGE));
14314 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
14315 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RB_BUG_INSTEAD_OF_RB_MEMERROR")),
Qtrue);
14340 rb_vm_register_special_exception(ruby_error_nomemory,
rb_eNoMemError,
"failed to allocate memory");
14349#if MALLOC_ALLOCATED_SIZE
14354 if (GC_COMPACTION_SUPPORTED) {
14369 if (GC_DEBUG_STRESS_TO_CLASS) {
14378#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14382 OPT(RGENGC_CHECK_MODE);
14383 OPT(RGENGC_PROFILE);
14384 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14385 OPT(GC_PROFILE_MORE_DETAIL);
14386 OPT(GC_ENABLE_LAZY_SWEEP);
14387 OPT(CALC_EXACT_MALLOC_SIZE);
14388 OPT(MALLOC_ALLOCATED_SIZE);
14389 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14390 OPT(GC_PROFILE_DETAIL_MEMORY);
14391 OPT(GC_COMPACTION_SUPPORTED);
14400#ifdef ruby_xmalloc2
14401#undef ruby_xmalloc2
14406#ifdef ruby_xrealloc
14407#undef ruby_xrealloc
14409#ifdef ruby_xrealloc2
14410#undef ruby_xrealloc2
14414ruby_xmalloc(
size_t size)
14416#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14417 ruby_malloc_info_file = __FILE__;
14418 ruby_malloc_info_line = __LINE__;
14420 return ruby_xmalloc_body(size);
14424ruby_xmalloc2(
size_t n,
size_t size)
14426#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14427 ruby_malloc_info_file = __FILE__;
14428 ruby_malloc_info_line = __LINE__;
14430 return ruby_xmalloc2_body(n, size);
14434ruby_xcalloc(
size_t n,
size_t size)
14436#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14437 ruby_malloc_info_file = __FILE__;
14438 ruby_malloc_info_line = __LINE__;
14440 return ruby_xcalloc_body(n, size);
14444ruby_xrealloc(
void *ptr,
size_t new_size)
14446#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14447 ruby_malloc_info_file = __FILE__;
14448 ruby_malloc_info_line = __LINE__;
14450 return ruby_xrealloc_body(ptr, new_size);
14454ruby_xrealloc2(
void *ptr,
size_t n,
size_t new_size)
14456#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14457 ruby_malloc_info_file = __FILE__;
14458 ruby_malloc_info_line = __LINE__;
14460 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
VALUE rb_eNoMemError
NoMemoryError exception.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
VALUE rb_mKernel
Kernel module.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_stdout
STDOUT constant.
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
#define RB_SYM2ID
Just another name of rb_sym2id.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
int capa
Designed capacity of the buffer.
int len
Length of the buffer.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY(obj)
Convenient casting macro.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
#define DATA_PTR(obj)
Convenient getter macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define ROBJECT(obj)
Convenient casting macro.
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_io * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
union RString::@51::@52::@54 aux
Auxiliary info.
VALUE shared
Parent of the string.
union RString::@51 as
String's specific fields.
struct RString::@51::@52 heap
Strings that use separated memory region for contents use this pattern.
const rb_data_type_t *const type
This field stores various information about how Ruby should handle a data.
This is the struct that holds necessary info for a struct.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
const char * wrap_struct_name
Name of structs of this kind.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_data_type_struct::@55 function
Function pointers.
VALUE flags
Type-specific behavioural characteristics.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
struct rb_io_encoding encs
Decomposed encoding flags.
VALUE self
The IO's Ruby level counterpart.
VALUE write_lock
This is a Ruby level mutex.
VALUE timeout
The timeout associated with this IO when performing blocking operations.
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
struct rmatch_offset * char_offset
Capture group offsets, in C array.
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
struct re_registers regs
"Registers" of a match.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Internal header for Class.
Represents the region of a capture group.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
ruby_value_type
C-level type of an object.
@ RUBY_T_MASK
Bitmask of ruby_value_type.