14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
43#undef rb_data_object_wrap
45#ifndef HAVE_MALLOC_USABLE_SIZE
47# define HAVE_MALLOC_USABLE_SIZE
48# define malloc_usable_size(a) _msize(a)
49# elif defined HAVE_MALLOC_SIZE
50# define HAVE_MALLOC_USABLE_SIZE
51# define malloc_usable_size(a) malloc_size(a)
54#ifdef HAVE_MALLOC_USABLE_SIZE
55# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56# include RUBY_ALTERNATIVE_MALLOC_HEADER
59# elif defined(HAVE_MALLOC_NP_H)
60# include <malloc_np.h>
61# elif defined(HAVE_MALLOC_MALLOC_H)
62# include <malloc/malloc.h>
70#ifdef HAVE_SYS_RESOURCE_H
71#include <sys/resource.h>
74#if defined _WIN32 || defined __CYGWIN__
76#elif defined(HAVE_POSIX_MEMALIGN)
77#elif defined(HAVE_MEMALIGN)
81#define rb_setjmp(env) RUBY_SETJMP(env)
82#define rb_jmp_buf rb_jmpbuf_t
84#if defined(_MSC_VER) && defined(_WIN64)
86#pragma intrinsic(_umul128)
102#elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103 p = __builtin_mul_overflow(x, y, &z);
105#elif defined(DSIZE_T)
112#elif defined(_MSC_VER) && defined(_WIN64)
114 unsigned __int64 dz = _umul128(x, y, &
dp);
134#elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135 p = __builtin_add_overflow(x, y, &z);
137#elif defined(DSIZE_T)
155 struct optional t = size_mul_overflow(x, y);
163 struct optional t = size_mul_overflow(x, y);
164 struct optional u = size_mul_overflow(z, w);
172size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
174 struct optional t = size_mul_overflow(x, y);
194 return size_mul_or_raise(x, y,
exc);
198size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
200 struct optional t = size_mul_add_overflow(x, y, z);
221 return size_mul_add_or_raise(x, y, z,
exc);
225size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
227 struct optional t = size_mul_add_mul_overflow(x, y, z, w);
246#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
258#ifndef GC_HEAP_INIT_SLOTS
259#define GC_HEAP_INIT_SLOTS 10000
261#ifndef GC_HEAP_FREE_SLOTS
262#define GC_HEAP_FREE_SLOTS 4096
264#ifndef GC_HEAP_GROWTH_FACTOR
265#define GC_HEAP_GROWTH_FACTOR 1.8
267#ifndef GC_HEAP_GROWTH_MAX_SLOTS
268#define GC_HEAP_GROWTH_MAX_SLOTS 0
270#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
274#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
277#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
280#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
284#ifndef GC_MALLOC_LIMIT_MIN
285#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
287#ifndef GC_MALLOC_LIMIT_MAX
288#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
290#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
294#ifndef GC_OLDMALLOC_LIMIT_MIN
295#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
297#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
300#ifndef GC_OLDMALLOC_LIMIT_MAX
301#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
304#ifndef PRINT_MEASURE_LINE
305#define PRINT_MEASURE_LINE 0
307#ifndef PRINT_ENTER_EXIT_TICK
308#define PRINT_ENTER_EXIT_TICK 0
310#ifndef PRINT_ROOT_TICKS
311#define PRINT_ROOT_TICKS 0
314#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
378#define RGENGC_DEBUG -1
380#define RGENGC_DEBUG 0
383#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
386# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
398#ifndef RGENGC_CHECK_MODE
399#define RGENGC_CHECK_MODE 0
403#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
411#ifndef RGENGC_OLD_NEWOBJ_CHECK
412#define RGENGC_OLD_NEWOBJ_CHECK 0
420#ifndef RGENGC_PROFILE
421#define RGENGC_PROFILE 0
430#ifndef RGENGC_ESTIMATE_OLDMALLOC
431#define RGENGC_ESTIMATE_OLDMALLOC 1
437#ifndef RGENGC_FORCE_MAJOR_GC
438#define RGENGC_FORCE_MAJOR_GC 0
446#define RGENGC_DEBUG 0
447#ifdef RGENGC_CHECK_MODE
448#undef RGENGC_CHECK_MODE
450#define RGENGC_CHECK_MODE 0
451#define RGENGC_PROFILE 0
452#define RGENGC_ESTIMATE_OLDMALLOC 0
453#define RGENGC_FORCE_MAJOR_GC 0
457#ifndef GC_PROFILE_MORE_DETAIL
458#define GC_PROFILE_MORE_DETAIL 0
460#ifndef GC_PROFILE_DETAIL_MEMORY
461#define GC_PROFILE_DETAIL_MEMORY 0
463#ifndef GC_ENABLE_INCREMENTAL_MARK
464#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
466#ifndef GC_ENABLE_LAZY_SWEEP
467#define GC_ENABLE_LAZY_SWEEP 1
469#ifndef CALC_EXACT_MALLOC_SIZE
470#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
472#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473#ifndef MALLOC_ALLOCATED_SIZE
474#define MALLOC_ALLOCATED_SIZE 0
477#define MALLOC_ALLOCATED_SIZE 0
479#ifndef MALLOC_ALLOCATED_SIZE_CHECK
480#define MALLOC_ALLOCATED_SIZE_CHECK 0
483#ifndef GC_DEBUG_STRESS_TO_CLASS
484#define GC_DEBUG_STRESS_TO_CLASS 0
487#ifndef RGENGC_OBJ_INFO
488#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498#if RGENGC_ESTIMATE_OLDMALLOC
531#if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545#if GC_PROFILE_DETAIL_MEMORY
551#if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555#if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562#if defined(_MSC_VER) || defined(__CYGWIN__)
614#if defined(_MSC_VER) || defined(__CYGWIN__)
623#define popcount_bits rb_popcount_intptr
640#define STACK_CHUNK_SIZE 500
663#if GC_ENABLE_INCREMENTAL_MARK
680#if MALLOC_ALLOCATED_SIZE
681 size_t allocated_size;
698#if GC_ENABLE_INCREMENTAL_MARK
745#if GC_PROFILE_MORE_DETAIL
754#if RGENGC_PROFILE > 0
755 size_t total_generated_normal_object_count;
756 size_t total_generated_shady_object_count;
757 size_t total_shade_operation_count;
758 size_t total_promoted_count;
759 size_t total_remembered_normal_object_count;
760 size_t total_remembered_shady_object_count;
762#if RGENGC_PROFILE >= 2
763 size_t generated_normal_object_count_types[
RUBY_T_MASK];
764 size_t generated_shady_object_count_types[
RUBY_T_MASK];
767 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
768 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
798#if RGENGC_ESTIMATE_OLDMALLOC
803#if RGENGC_CHECK_MODE >= 2
814#if GC_ENABLE_INCREMENTAL_MARK
825#if GC_DEBUG_STRESS_TO_CLASS
832#define HEAP_PAGE_ALIGN_LOG 14
833#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
876#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
880#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
886#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
891#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
894#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
900#define rb_objspace (*rb_objspace_of(GET_VM()))
901#define rb_objspace_of(vm) ((vm)->objspace)
903#define ruby_initial_gc_stress gc_params.gc_stress
907#define malloc_limit objspace->malloc_params.limit
908#define malloc_increase objspace->malloc_params.increase
909#define malloc_allocated_size objspace->malloc_params.allocated_size
910#define heap_pages_sorted objspace->heap_pages.sorted
911#define heap_allocated_pages objspace->heap_pages.allocated_pages
912#define heap_pages_sorted_length objspace->heap_pages.sorted_length
913#define heap_pages_lomem objspace->heap_pages.range[0]
914#define heap_pages_himem objspace->heap_pages.range[1]
915#define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917#define heap_pages_final_slots objspace->heap_pages.final_slots
918#define heap_pages_deferred_final objspace->heap_pages.deferred_final
919#define heap_eden (&objspace->eden_heap)
920#define heap_tomb (&objspace->tomb_heap)
921#define dont_gc objspace->flags.dont_gc
922#define during_gc objspace->flags.during_gc
923#define finalizing objspace->atomic_flags.finalizing
924#define finalizer_table objspace->finalizer_table
925#define global_list objspace->global_list
926#define ruby_gc_stressful objspace->flags.gc_stressful
927#define ruby_gc_stress_mode objspace->gc_stress_mode
928#if GC_DEBUG_STRESS_TO_CLASS
929#define stress_to_class objspace->stress_to_class
931#define stress_to_class 0
935gc_mode_verify(
enum gc_mode mode)
937#if RGENGC_CHECK_MODE > 0
944 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
950#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
953#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
956#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
958#define is_full_marking(objspace) TRUE
960#if GC_ENABLE_INCREMENTAL_MARK
961#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
963#define is_incremental_marking(objspace) FALSE
965#if GC_ENABLE_INCREMENTAL_MARK
966#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
968#define will_be_incremental_marking(objspace) FALSE
970#define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
973#if SIZEOF_LONG == SIZEOF_VOIDP
974# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
976#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
984#define RANY(o) ((RVALUE*)(o))
993#define RZOMBIE(o) ((struct RZombie *)(o))
995#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
997#if RUBY_MARK_FREE_DEBUG
998int ruby_gc_debug_indent = 0;
1013NORETURN(
static void negative_size_allocation_error(
const char *));
1023static inline void gc_enter(
rb_objspace_t *objspace,
const char *event);
1024static inline void gc_exit(
rb_objspace_t *objspace,
const char *event);
1026static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1027static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1030static void gc_marks_step(
rb_objspace_t *objspace,
int slots);
1057static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1059static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1060static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1067static double getrusage_time(
void);
1068static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
1071static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1073static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1074static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1078#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1084#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1086#define gc_prof_record(objspace) (objspace)->profile.current_record
1087#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1089#ifdef HAVE_VA_ARGS_MACRO
1090# define gc_report(level, objspace, ...) \
1091 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1093# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1096static const char *obj_info(
VALUE obj);
1098#define PUSH_MARK_FUNC_DATA(v) do { \
1099 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100 objspace->mark_func_data = (v);
1102#define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1122#if defined(__GNUC__) && defined(__i386__)
1123typedef unsigned long long tick_t;
1124#define PRItick "llu"
1128 unsigned long long int x;
1129 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1133#elif defined(__GNUC__) && defined(__x86_64__)
1134typedef unsigned long long tick_t;
1135#define PRItick "llu"
1137static __inline__ tick_t
1140 unsigned long hi,
lo;
1141 __asm__ __volatile__ (
"rdtsc" :
"=a"(
lo),
"=d"(
hi));
1142 return ((
unsigned long long)
lo)|( ((
unsigned long long)
hi)<<32);
1145#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146typedef unsigned long long tick_t;
1147#define PRItick "llu"
1149static __inline__ tick_t
1152 unsigned long long val = __builtin_ppc_get_timebase();
1156#elif defined(__aarch64__) && defined(__GNUC__)
1157typedef unsigned long tick_t;
1160static __inline__ tick_t
1164 __asm__ __volatile__ (
"mrs %0, cntvct_el0", :
"=r" (val));
1169#elif defined(_WIN32) && defined(_MSC_VER)
1171typedef unsigned __int64 tick_t;
1172#define PRItick "llu"
1182#define PRItick "llu"
1192typedef double tick_t;
1193#define PRItick "4.9f"
1198 return getrusage_time();
1201#error "choose tick type"
1204#define MEASURE_LINE(expr) do { \
1205 volatile tick_t start_time = tick(); \
1206 volatile tick_t end_time; \
1208 end_time = tick(); \
1209 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1213#define MEASURE_LINE(expr) expr
1216#define FL_CHECK2(name, x, pred) \
1217 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1218 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1219#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1220#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1221#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1223#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1224#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1225#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1228#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1229#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1230#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1232#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1233#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1234#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1236#define RVALUE_OLD_AGE 3
1237#define RVALUE_AGE_SHIFT 5
1246RVALUE_FLAGS_AGE(
VALUE flags)
1254check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1260 fprintf(
stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)
obj);
1263 else if (!is_pointer_to_heap(objspace, (
void *)
obj)) {
1269 fprintf(
stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1270 (
void *)
obj, (
void *)page);
1275 fprintf(
stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)
obj);
1288 fprintf(
stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(
obj));
1292 fprintf(
stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(
obj));
1296 fprintf(
stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(
obj));
1306 if (age > 0 && wb_unprotected_bit) {
1307 fprintf(
stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(
obj), age);
1311 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1312 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(
obj));
1317 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1318 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1319 obj_info(
obj), age);
1323 fprintf(
stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1324 obj_info(
obj), age);
1338 fprintf(
stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(
obj));
1344 if (
err > 0 && terminate) {
1345 rb_bug(
"check_rvalue_consistency_force: there is %d errors.",
err);
1351#if RGENGC_CHECK_MODE == 0
1353check_rvalue_consistency(
const VALUE obj)
1359check_rvalue_consistency(
const VALUE obj)
1361 check_rvalue_consistency_force(
obj,
TRUE);
1373 void *poisoned = asan_poisoned_object_p(
obj);
1374 asan_unpoison_object(
obj,
false);
1380 asan_poison_object(
obj);
1389 check_rvalue_consistency(
obj);
1396 check_rvalue_consistency(
obj);
1404 check_rvalue_consistency(
obj);
1411 check_rvalue_consistency(
obj);
1418 check_rvalue_consistency(
obj);
1425 check_rvalue_consistency(
obj);
1439 check_rvalue_consistency(
obj);
1440 return RVALUE_OLD_P_RAW(
obj);
1443#if RGENGC_CHECK_MODE || GC_DEBUG
1447 check_rvalue_consistency(
obj);
1459#if RGENGC_PROFILE >= 2
1460 objspace->
profile.total_promoted_count++;
1485 int age = RVALUE_FLAGS_AGE(
flags);
1488 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(
obj));
1495 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1497 check_rvalue_consistency(
obj);
1504 check_rvalue_consistency(
obj);
1508 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1510 check_rvalue_consistency(
obj);
1517 check_rvalue_consistency(
obj);
1522 check_rvalue_consistency(
obj);
1535 check_rvalue_consistency(
obj);
1542 RVALUE_DEMOTE_RAW(objspace,
obj);
1544 if (RVALUE_MARKED(
obj)) {
1548 check_rvalue_consistency(
obj);
1560 check_rvalue_consistency(
obj);
1563 RVALUE_AGE_RESET_RAW(
obj);
1564 check_rvalue_consistency(
obj);
1570 return RVALUE_MARKED(
obj) && !RVALUE_MARKING(
obj);
1577 return RVALUE_MARKED(
obj) && RVALUE_MARKING(
obj);
1584 return RVALUE_MARKED(
obj) ==
FALSE;
1618 rb_bug(
"lazy sweeping underway when freeing object space");
1653heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1658 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)
size);
1688 heap_pages_expand_sorted_to(objspace, next_length);
1696heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1699 heap_pages_expand_sorted(objspace);
1707 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1719 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1722 asan_poison_object(
obj);
1724 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)
obj);
1730 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1739#if GC_ENABLE_INCREMENTAL_MARK
1743 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1768static void rb_aligned_free(
void *
ptr);
1789 heap_unlink_page(objspace,
heap_tomb, page);
1790 heap_page_free(objspace, page);
1814 if (page_body == 0) {
1819 page = calloc1(
sizeof(
struct heap_page));
1821 rb_aligned_free(page_body);
1832 end = start + limit;
1840 mid = (
lo +
hi) / 2;
1879 for (p =
start; p != end; p++) {
1880 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
1881 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1895 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1897 heap_unlink_page(objspace,
heap_tomb, page);
1910 const char *method =
"recycle";
1914 page = heap_page_resurrect(objspace);
1917 page = heap_page_allocate(objspace);
1918 method =
"allocate";
1920 if (0)
fprintf(
stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1937 struct heap_page *page = heap_page_create(objspace);
1938 heap_add_page(objspace, heap, page);
1939 heap_add_freepage(heap, page);
1947 heap_allocatable_pages_set(objspace,
add);
1949 for (
i = 0;
i <
add;
i++) {
1950 heap_assign_page(objspace, heap);
1963 if (goal_ratio == 0.0) {
1973 if (
f < 1.0)
f = 1.1;
1975 next_used = (
size_t)(
f * used);
1980 " G(%1.2f), f(%1.2f),"
1983 goal_ratio,
f, used, next_used);
1989 if (next_used > max_used) next_used = max_used;
1992 return next_used - used;
1996heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
1999 size_t next_used_limit = used + additional_pages;
2003 heap_allocatable_pages_set(objspace, next_used_limit - used);
2012 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2018 heap_assign_page(objspace, heap);
2030 gc_sweep_continue(objspace, heap);
2033 gc_marks_continue(objspace, heap);
2050 heap_prepare(objspace, heap);
2057 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
2062 asan_unpoison_object((
VALUE)p,
true);
2073 asan_unpoison_object((
VALUE)p,
true);
2084 asan_unpoison_object((
VALUE)p,
true);
2089 p = heap_get_freeobj_from_next_freepage(objspace, heap);
2106 if (
pc && VM_FRAME_RUBYFRAME_P(ec->
cfp)) {
2114#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2115#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2117#define gc_event_hook(objspace, event, data) do { \
2118 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2119 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2126#if !__has_feature(memory_sanitizer)
2147#if RGENGC_CHECK_MODE
2154 if (RVALUE_AGE(
obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(
obj), RVALUE_AGE(
obj));
2157 if (RVALUE_AGE(
obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(
obj), RVALUE_AGE(
obj));
2159 if (rgengc_remembered(objspace, (
VALUE)
obj))
rb_bug(
"newobj: %s is remembered.", obj_info(
obj));
2170 objspace->
profile.total_generated_normal_object_count++;
2171#if RGENGC_PROFILE >= 2
2176 objspace->
profile.total_generated_shady_object_count++;
2177#if RGENGC_PROFILE >= 2
2192#if RGENGC_OLD_NEWOBJ_CHECK > 0
2199 if (--newobj_cnt == 0) {
2202 gc_mark_set(objspace,
obj);
2203 RVALUE_AGE_SET_OLD(objspace,
obj);
2210 check_rvalue_consistency(
obj);
2223 rb_bug(
"object allocation during garbage collection phase");
2263#if GC_DEBUG_STRESS_TO_CLASS
2266 for (
i = 0;
i <
cnt; ++
i) {
2280 return wb_protected ?
2314#define UNEXPECTED_NODE(func) \
2315 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2316 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2335rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *
buf,
size_t cnt)
2397#undef rb_data_object_alloc
2418#undef rb_data_typed_object_alloc
2437 if (
ptr &&
type->function.dsize) {
2438 return type->function.dsize(
ptr);
2461 register size_t hi,
lo, mid;
2475 mid = (
lo +
hi) / 2;
2477 if (page->
start <= p) {
2498free_const_entry_i(
VALUE value,
void *data)
2543 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(
obj));
2559 rb_bug(
"obj_free() called for broken object");
2569 obj_free_object_id(objspace,
obj);
2575#if RGENGC_CHECK_MODE
2576#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2577 CHECK(RVALUE_WB_UNPROTECTED);
2578 CHECK(RVALUE_MARKED);
2579 CHECK(RVALUE_MARKING);
2580 CHECK(RVALUE_UNCOLLECTIBLE);
2637#if USE_DEBUG_COUNTER
2695 if (
RANY(
obj)->as.regexp.ptr) {
2702 int free_immediately =
FALSE;
2703 void (*dfree)(
void *);
2708 dfree =
RANY(
obj)->as.typeddata.type->function.dfree;
2709 if (0 && free_immediately == 0) {
2715 dfree =
RANY(
obj)->as.data.dfree;
2723 else if (free_immediately) {
2728 make_zombie(objspace,
obj, dfree, data);
2739 if (
RANY(
obj)->as.match.rmatch) {
2741#if USE_DEBUG_COUNTER
2761 if (
RANY(
obj)->as.file.fptr) {
2762 make_io_zombie(objspace,
obj);
2887 make_zombie(objspace,
obj, 0, 0);
2896#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2897#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2918static const struct st_hash_type object_id_hash_type = {
2932#if RGENGC_ESTIMATE_OLDMALLOC
2954static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
2977 pstart = page->
start;
2980 if ((*callback)(pstart, pend,
sizeof(
RVALUE), data)) {
2987objspace_each_objects_protected(
VALUE arg)
2995incremental_enable(
VALUE _)
3053 if (prev_dont_incremental) {
3078 asan_unpoison_object(
obj,
false);
3079 bool used_p = p->
as.
basic.flags;
3093 if (!p->
as.
basic.klass)
break;
3099 if (!p->
as.
basic.klass)
break;
3103 if (
ptr || ! used_p) {
3104 asan_poison_object(
obj);
3112 return internal_object_p(
obj);
3116os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3121 for (; p != pend; p++) {
3123 if (!internal_object_p(
v)) {
3188 return os_obj_of(
of);
3217should_be_callable(
VALUE block)
3252 should_be_finalizable(
obj);
3257 should_be_callable(block);
3260 return define_final0(
obj, block);
3276 table = (
VALUE)data;
3283 for (
i = 0;
i <
len;
i++) {
3304 should_be_finalizable(
obj);
3305 should_be_callable(block);
3306 return define_final0(
obj, block);
3318 table = (
VALUE)data;
3343#define RESTORE_FINALIZER() (\
3344 ec->cfp = saved.cfp, \
3345 rb_set_errinfo(saved.errinfo))
3349 saved.cfp = ec->
cfp;
3357 for (
i = saved.finished;
3359 saved.finished = ++
i) {
3363#undef RESTORE_FINALIZER
3377 run_finalizer(objspace, zombie, (
VALUE)table);
3387 asan_unpoison_object(zombie,
false);
3388 next_zombie =
RZOMBIE(zombie)->next;
3391 run_final(objspace, zombie);
3395 obj_free_object_id(objspace, zombie);
3398 RZOMBIE(zombie)->basic.flags = 0;
3402 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
3406 zombie = next_zombie;
3416 finalize_list(objspace, zombie);
3421gc_finalize_deferred(
void *dmy)
3425 finalize_deferred(objspace);
3433 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
3461#if RGENGC_CHECK_MODE >= 2
3462 gc_verify_internal_consistency(objspace);
3469 finalize_deferred(objspace);
3483 run_finalizer(objspace, curr->
obj, curr->
table);
3494 gc_enter(objspace,
"rb_objspace_call_finalizer");
3500 void *poisoned = asan_poisoned_object_p((
VALUE)p);
3501 asan_unpoison_object((
VALUE)p,
false);
3508 p->as.free.flags = 0;
3510 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3515 else if (
RANY(p)->as.data.dfree) {
3516 make_zombie(objspace, (
VALUE)p,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
3520 if (
RANY(p)->as.file.fptr) {
3521 make_io_zombie(objspace, (
VALUE)p);
3527 asan_poison_object((
VALUE)p);
3533 gc_exit(objspace,
"rb_objspace_call_finalizer");
3548 if (!is_pointer_to_heap(objspace, (
void *)
ptr))
return FALSE;
3577 is_swept_object(objspace,
ptr) ||
3596 if (!is_garbage_object(objspace,
ptr)) {
3608 check_rvalue_consistency(
obj);
3616 return is_markable_object(objspace,
obj) && is_live_object(objspace,
obj);
3623 return is_garbage_object(objspace,
obj);
3654#if SIZEOF_LONG == SIZEOF_VOIDP
3655#define NUM2PTR(x) NUM2ULONG(x)
3656#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3657#define NUM2PTR(x) NUM2ULL(x)
3674 if ((
ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3683 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
3684 is_live_object(objspace, orig)) {
3698 return id2ref(objid);
3708#if SIZEOF_LONG == SIZEOF_VOIDP
3718 return get_heap_object_id(
obj);
3755 return rb_find_object_id(
obj, nonspecial_obj_id_);
3817 return rb_find_object_id(
obj, cached_object_id);
3823obj_memsize_of(
VALUE obj,
int use_all_types)
3942 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3952 return obj_memsize_of(
obj,
TRUE);
3965type_sym(
size_t type)
3968#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4061 for (;p < pend; p++) {
4062 void *poisoned = asan_poisoned_object_p((
VALUE)p);
4063 asan_unpoison_object((
VALUE)p,
false);
4072 asan_poison_object((
VALUE)p);
4121gc_setup_mark_bits(
struct heap_page *page)
4136 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
4137 RVALUE *p, *pend,*offset;
4140 gc_report(2, objspace,
"page_sweep: start.\n");
4157 asan_unpoison_object((
VALUE)p,
false);
4161 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
4162#if USE_RGENGC && RGENGC_CHECK_MODE
4164 if (RVALUE_OLD_P((
VALUE)p))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
4165 if (rgengc_remembered_sweep(objspace, (
VALUE)p))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
4168 if (obj_free(objspace, (
VALUE)p)) {
4173 heap_page_add_freeobj(objspace, sweep_page, (
VALUE)p);
4174 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info((
VALUE)p));
4176 asan_poison_object((
VALUE)p);
4196 gc_setup_mark_bits(sweep_page);
4198#if GC_PROFILE_MORE_DETAIL
4201 record->removing_objects +=
final_slots + freed_slots;
4202 record->empty_objects += empty_slots;
4205 if (0)
fprintf(
stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4210 sweep_page->
free_slots = freed_slots + empty_slots;
4218 gc_finalize_deferred_register(objspace);
4222 gc_report(2, objspace,
"page_sweep: end.\n");
4224 return freed_slots + empty_slots;
4233 heap_set_increment(objspace, 1);
4234 if (!heap_increment(objspace, heap)) {
4241gc_mode_name(
enum gc_mode mode)
4247 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
4254#if RGENGC_CHECK_MODE
4256 switch (prev_mode) {
4262 if (0)
fprintf(
stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(mode));
4271#if GC_ENABLE_INCREMENTAL_MARK
4277 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
4290#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4297 gc_sweep_start_heap(objspace,
heap_eden);
4303 gc_report(1, objspace,
"gc_sweep_finish\n");
4305 gc_prof_set_heap_info(objspace);
4306 heap_pages_free_unused_pages(objspace);
4309 if (heap_allocatable_pages < heap_tomb->total_pages) {
4310 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
4316#if RGENGC_CHECK_MODE >= 2
4317 gc_verify_internal_consistency(objspace);
4325 int unlink_limit = 3;
4326#if GC_ENABLE_INCREMENTAL_MARK
4329 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
4331 gc_report(2, objspace,
"gc_sweep_step\n");
4336#if GC_ENABLE_LAZY_SWEEP
4337 gc_prof_sweep_timer_start(objspace);
4341 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4350 heap_unlink_page(objspace, heap, sweep_page);
4351 heap_add_page(objspace,
heap_tomb, sweep_page);
4354#if GC_ENABLE_INCREMENTAL_MARK
4356 if (heap_add_poolpage(objspace, heap, sweep_page)) {
4361 heap_add_freepage(heap, sweep_page);
4365 heap_add_freepage(heap, sweep_page);
4375 gc_sweep_finish(objspace);
4378#if GC_ENABLE_LAZY_SWEEP
4379 gc_prof_sweep_timer_stop(objspace);
4391 gc_sweep_step(objspace, heap);
4401 gc_enter(objspace,
"sweep_continue");
4404 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
4407 gc_sweep_step(objspace, heap);
4408 gc_exit(objspace,
"sweep_continue");
4416 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4418 if (immediate_sweep) {
4419#if !GC_ENABLE_LAZY_SWEEP
4420 gc_prof_sweep_timer_start(objspace);
4422 gc_sweep_start(objspace);
4423 gc_sweep_rest(objspace);
4424#if !GC_ENABLE_LAZY_SWEEP
4425 gc_prof_sweep_timer_stop(objspace);
4430 gc_sweep_start(objspace);
4438 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
4444stack_chunk_alloc(
void)
4469 chunk = chunk->
next;
4478 stack->
cache = chunk;
4488 chunk = stack->
cache;
4504 next = stack->
cache;
4511 next = stack_chunk_alloc();
4514 stack->
chunk = next;
4525 add_stack_chunk_cache(stack, stack->
chunk);
4526 stack->
chunk = prev;
4536 while (chunk !=
NULL) {
4547 push_mark_stack_chunk(stack);
4555 if (is_mark_stack_empty(stack)) {
4558 if (stack->
index == 1) {
4560 pop_mark_stack_chunk(stack);
4568#if GC_ENABLE_INCREMENTAL_MARK
4573 for (
i=0;
i<limit;
i++) {
4586 int limit = stack->
index;
4589 if (invalidate_mark_stack_chunk(chunk, limit,
obj))
return;
4590 chunk = chunk->
next;
4591 limit = stack->
limit;
4593 rb_bug(
"invalid_mark_stack: unreachable");
4606 for (
i=0;
i < 4;
i++) {
4607 add_stack_chunk_cache(stack, stack_chunk_alloc());
4614#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4616#define STACK_START (ec->machine.stack_start)
4617#define STACK_END (ec->machine.stack_end)
4618#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4620#ifdef __EMSCRIPTEN__
4621#undef STACK_GROW_DIRECTION
4622#define STACK_GROW_DIRECTION 1
4625#if STACK_GROW_DIRECTION < 0
4626# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4627#elif STACK_GROW_DIRECTION > 0
4628# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4630# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4631 : (size_t)(STACK_END - STACK_START + 1))
4633#if !STACK_GROW_DIRECTION
4655#define PREVENT_STACK_OVERFLOW 1
4656#ifndef PREVENT_STACK_OVERFLOW
4657#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4658# define PREVENT_STACK_OVERFLOW 1
4660# define PREVENT_STACK_OVERFLOW 0
4663#if PREVENT_STACK_OVERFLOW
4672 return length > maximum_length;
4675#define stack_check(ec, water_mark) FALSE
4678#define STACKFRAME_FOR_CALL_CFUNC 2048
4699 gc_mark_maybe(objspace,
v);
4709 if (end <=
start)
return;
4711 mark_locations_array(objspace,
start,
n);
4725 for (
i=0;
i<
n;
i++) {
4726 gc_mark(objspace, values[
i]);
4736 for (
i=0;
i<
n;
i++) {
4737 gc_mark_and_pin(objspace, values[
i]);
4746 for (
i=0;
i<
n;
i++) {
4749 gc_mark_and_pin(objspace, values[
i]);
4758 gc_mark_and_pin_stack_values(objspace,
n, values);
4765 gc_mark(objspace, (
VALUE)value);
4773 gc_mark_and_pin(objspace, (
VALUE)value);
4795 gc_mark_and_pin(objspace, (
VALUE)
key);
4825 gc_mark(objspace, (
VALUE)value);
4834 gc_mark_and_pin(objspace, (
VALUE)
key);
4835 gc_mark_and_pin(objspace, (
VALUE)value);
4844 gc_mark_and_pin(objspace, (
VALUE)
key);
4845 gc_mark(objspace, (
VALUE)value);
4867 gc_mark(objspace,
RHASH(hash)->ifnone);
4892 switch (def->
type) {
4924mark_method_entry_i(
VALUE me,
void *data)
4928 gc_mark(objspace,
me);
4941mark_const_entry_i(
VALUE value,
void *data)
4946 gc_mark(objspace, ce->
value);
4947 gc_mark(objspace, ce->
file);
4958#if STACK_GROW_DIRECTION < 0
4959#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4960#elif STACK_GROW_DIRECTION > 0
4961#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4963#define GET_STACK_BOUNDS(start, end, appendix) \
4964 ((STACK_END < STACK_START) ? \
4965 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4969 const VALUE *stack_start,
const VALUE *stack_end);
4977 } save_regs_gc_mark;
4978 VALUE *stack_start, *stack_end;
4981 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
4991 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
4993 mark_stack_locations(objspace, ec, stack_start, stack_end);
5000 VALUE *stack_start, *stack_end;
5003 mark_stack_locations(objspace, ec, stack_start, stack_end);
5008 const VALUE *stack_start,
const VALUE *stack_end)
5011 gc_mark_locations(objspace, stack_start, stack_end);
5013#if defined(__mc68000__)
5014 gc_mark_locations(objspace,
5015 (
VALUE*)((
char*)stack_start + 2),
5016 (
VALUE*)((
char*)stack_end - 2));
5037 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
5039 asan_unpoison_object(
obj,
false);
5048 gc_mark_and_pin(objspace,
obj);
5054 asan_poison_object(
obj);
5068 if (RVALUE_MARKED(
obj))
return 0;
5085#if RGENGC_PROFILE > 0
5086 objspace->
profile.total_remembered_shady_object_count++;
5087#if RGENGC_PROFILE >= 2
5106 if (RVALUE_WB_UNPROTECTED(
obj)) {
5107 if (gc_remember_unprotected(objspace,
obj)) {
5108 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5112 if (!RVALUE_OLD_P(
obj)) {
5113 if (RVALUE_MARKED(
obj)) {
5115 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5116 RVALUE_AGE_SET_OLD(objspace,
obj);
5118 if (!RVALUE_MARKING(
obj)) {
5119 gc_grey(objspace,
obj);
5123 rgengc_remember(objspace,
obj);
5127 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5128 RVALUE_AGE_SET_CANDIDATE(objspace,
obj);
5141#if RGENGC_CHECK_MODE
5142 if (RVALUE_MARKED(
obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(
obj));
5143 if (RVALUE_MARKING(
obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(
obj));
5146#if GC_ENABLE_INCREMENTAL_MARK
5162 check_rvalue_consistency(
obj);
5165 if (!RVALUE_OLD_P(
obj)) {
5166 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(
obj));
5167 RVALUE_AGE_INC(objspace,
obj);
5171 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page,
obj);
5174 check_rvalue_consistency(
obj);
5186 rgengc_check_relation(objspace,
obj);
5187 if (!gc_mark_set(objspace,
obj))
return;
5189 gc_aging(objspace,
obj);
5190 gc_grey(objspace,
obj);
5209 if (!is_markable_object(objspace,
obj))
return;
5210 gc_pin(objspace,
obj);
5211 gc_mark_ptr(objspace,
obj);
5217 if (!is_markable_object(objspace,
obj))
return;
5218 gc_mark_ptr(objspace,
obj);
5247 if (RVALUE_OLD_P(
obj)) {
5264 gc_mark_values(objspace, (
long)
env->env_size,
env->env);
5267 gc_mark(objspace, (
VALUE)
env->iseq);
5271 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.klass);
5272 gc_mark(objspace, (
VALUE)
RANY(
obj)->as.imemo.cref.next);
5273 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.refinements);
5276 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.cref_or_me);
5277 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.lastline);
5278 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.backref);
5279 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.others);
5282 gc_mark(objspace,
RANY(
obj)->as.imemo.throw_data.throw_obj);
5285 gc_mark_maybe(objspace, (
VALUE)
RANY(
obj)->as.imemo.ifunc.data);
5288 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v1);
5289 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v2);
5290 gc_mark_maybe(objspace,
RANY(
obj)->as.imemo.memo.u3.value);
5293 mark_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
5312#if VM_CHECK_MODE > 0
5323 gc_mark_set_parent(objspace,
obj);
5339 rb_bug(
"rb_gc_mark() called for broken object");
5347 gc_mark_imemo(objspace,
obj);
5351 gc_mark(objspace, any->
as.
basic.klass);
5379 gc_mark(objspace, root);
5384 for (
i=0;
i <
len;
i++) {
5385 gc_mark(objspace,
ptr[
i]);
5398 mark_hash(objspace,
obj);
5414 if (mark_func) (*mark_func)(
ptr);
5425 for (
i = 0;
i <
len;
i++) {
5426 gc_mark(objspace,
ptr[
i]);
5476 gc_mark(objspace,
ptr[
i]);
5493 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
5495 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
5508#if GC_ENABLE_INCREMENTAL_MARK
5509 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
5510 size_t popped_count = 0;
5513 while (pop_mark_stack(mstack, &
obj)) {
5517 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(
obj));
5519 gc_mark_children(objspace,
obj);
5521#if GC_ENABLE_INCREMENTAL_MARK
5524 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
5529 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) >
count) {
5541 if (is_mark_stack_empty(mstack)) {
5542 shrink_stack_chunk_cache(mstack);
5553 return gc_mark_stacked_objects(objspace,
TRUE,
count);
5559 return gc_mark_stacked_objects(objspace,
FALSE, 0);
5563#define MAX_TICKS 0x100
5564static tick_t mark_ticks[MAX_TICKS];
5565static const char *mark_ticks_categories[MAX_TICKS];
5568show_mark_ticks(
void)
5572 for (
i=0;
i<MAX_TICKS;
i++) {
5573 const char *category = mark_ticks_categories[
i];
5575 fprintf(
stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[
i]);
5586gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
5590 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5593 tick_t start_tick = tick();
5595 const char *prev_category = 0;
5597 if (mark_ticks_categories[0] == 0) {
5602 if (categoryp) *categoryp =
"xxx";
5609#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5610 if (prev_category) { \
5611 tick_t t = tick(); \
5612 mark_ticks[tick_count] = t - start_tick; \
5613 mark_ticks_categories[tick_count] = prev_category; \
5616 prev_category = category; \
5617 start_tick = tick(); \
5620#define MARK_CHECKPOINT_PRINT_TICK(category)
5623#define MARK_CHECKPOINT(category) do { \
5624 if (categoryp) *categoryp = category; \
5625 MARK_CHECKPOINT_PRINT_TICK(category); \
5631 if (vm->
self) gc_mark(objspace, vm->
self);
5637 mark_current_machine_context(objspace, ec);
5642 gc_mark_maybe(objspace, *
list->varptr);
5658#undef MARK_CHECKPOINT
5661#if RGENGC_CHECK_MODE >= 4
5663#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5664#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5665#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5673static struct reflist *
5676 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
5679 refs->list[0] =
obj;
5685reflist_destruct(
struct reflist *refs)
5692reflist_add(
struct reflist *refs,
VALUE obj)
5694 if (refs->pos == refs->size) {
5699 refs->list[refs->pos++] =
obj;
5703reflist_dump(
struct reflist *refs)
5706 for (
i=0;
i<refs->pos;
i++) {
5708 if (IS_ROOTSIG(
obj)) {
5719reflist_referred_from_machine_context(
struct reflist *refs)
5722 for (
i=0;
i<refs->pos;
i++) {
5724 if (IS_ROOTSIG(
obj) &&
strcmp(GET_ROOTSIG(
obj),
"machine_context") == 0)
return 1;
5739 const char *category;
5745allrefs_add(
struct allrefs *data,
VALUE obj)
5747 struct reflist *refs;
5750 reflist_add(refs, data->root_obj);
5754 refs = reflist_create(data->root_obj);
5763 struct allrefs *data = (
struct allrefs *)
ptr;
5765 if (allrefs_add(data,
obj)) {
5766 push_mark_stack(&data->mark_stack,
obj);
5773 struct allrefs *data = (
struct allrefs *)
ptr;
5775 data->root_obj = MAKE_ROOTSIG(data->category);
5777 if (allrefs_add(data,
obj)) {
5778 push_mark_stack(&data->mark_stack,
obj);
5785 struct allrefs data;
5786 struct mark_func_data_struct mfd;
5791 data.objspace = objspace;
5793 init_mark_stack(&data.mark_stack);
5795 mfd.mark_func = allrefs_roots_i;
5801 gc_mark_roots(objspace, &data.category);
5805 while (pop_mark_stack(&data.mark_stack, &
obj)) {
5808 free_stack_chunks(&data.mark_stack);
5811 return data.references;
5817 struct reflist *refs = (
struct reflist *)value;
5818 reflist_destruct(refs);
5823objspace_allrefs_destruct(
struct st_table *refs)
5825 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5829#if RGENGC_CHECK_MODE >= 5
5834 struct reflist *refs = (
struct reflist *)
v;
5844 fprintf(
stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
5853 struct reflist *refs = (
struct reflist *)
v;
5858 fprintf(
stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(
obj));
5859 fprintf(
stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)
obj);
5862 if (reflist_referred_from_machine_context(refs)) {
5867 objspace->
rgengc.error_count++;
5878#if RGENGC_ESTIMATE_OLDMALLOC
5883 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5889 if (objspace->
rgengc.error_count > 0) {
5890#if RGENGC_CHECK_MODE >= 5
5891 allrefs_dump(objspace);
5893 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5896 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5897 objspace->
rgengc.allrefs_table = 0;
5901#if RGENGC_ESTIMATE_OLDMALLOC
5922check_generation_i(
const VALUE child,
void *
ptr)
5929 if (!RVALUE_OLD_P(child)) {
5930 if (!RVALUE_REMEMBERED(
parent) &&
5931 !RVALUE_REMEMBERED(child) &&
5932 !RVALUE_UNCOLLECTIBLE(child)) {
5933 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(
parent), obj_info(child));
5940check_color_i(
const VALUE child,
void *
ptr)
5945 if (!RVALUE_WB_UNPROTECTED(
parent) && RVALUE_WHITE_P(child)) {
5946 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5947 obj_info(
parent), obj_info(child));
5954check_children_i(
const VALUE child,
void *
ptr)
5957 if (check_rvalue_consistency_force(child,
FALSE) != 0) {
5958 fprintf(
stderr,
"check_children_i: %s has error (referenced from %s)",
5959 obj_info(child), obj_info(data->
parent));
5967verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *
ptr)
5974 void *poisoned = asan_poisoned_object_p(
obj);
5975 asan_unpoison_object(
obj,
false);
6001 if (RVALUE_BLACK_P(
obj)) {
6017 asan_poison_object(
obj);
6029 unsigned int has_remembered_shady =
FALSE;
6030 unsigned int has_remembered_old =
FALSE;
6031 int remembered_old_objects = 0;
6032 int free_objects = 0;
6033 int zombie_objects = 0;
6037 void *poisoned = asan_poisoned_object_p(val);
6038 asan_unpoison_object(val,
false);
6040 if (
RBASIC(val) == 0) free_objects++;
6043 has_remembered_shady =
TRUE;
6046 has_remembered_old =
TRUE;
6047 remembered_old_objects++;
6052 asan_poison_object(val);
6065 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6066 (
void *)page, remembered_old_objects,
obj ? obj_info(
obj) :
"");
6070 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6071 (
void *)page,
obj ? obj_info(
obj) :
"");
6077 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, (
int)page->free_slots, free_objects);
6081 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, (
int)page->final_slots, zombie_objects);
6084 return remembered_old_objects;
6093 int remembered_old_objects = 0;
6097 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
6101 asan_unpoison_object((
VALUE)p,
false);
6103 fprintf(
stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info((
VALUE)p));
6106 asan_poison_object((
VALUE)prev);
6111 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
6115 return remembered_old_objects;
6121 int remembered_old_objects = 0;
6122 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_eden->pages);
6123 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_tomb->pages);
6124 return remembered_old_objects;
6138gc_verify_internal_consistency_m(
VALUE dummy)
6155 objspace_each_objects_without_setup(
objspace, verify_internal_consistency_i, &data);
6158#if RGENGC_CHECK_MODE >= 5
6163 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
6173 fprintf(
stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6191 size_t list_count = 0;
6204 rb_bug(
"inconsistent finalizing object count:\n"
6207 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6224gc_verify_transient_heap_internal_consistency(
VALUE dmy)
6241#if GC_ENABLE_INCREMENTAL_MARK
6244 if (0)
fprintf(
stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6269#if GC_ENABLE_INCREMENTAL_MARK
6290 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
6293 gc_mark_children(objspace, (
VALUE)p);
6302 gc_mark_stacked_objects_all(objspace);
6306heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
6312 heap_add_freepage(heap, page);
6322#if GC_ENABLE_INCREMENTAL_MARK
6326 heap_move_pooled_pages_to_free_pages(
heap_eden);
6327 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
6332 rb_bug(
"gc_marks_finish: mark stack is not empty (%d).", (
int)mark_stack_size(&objspace->
mark_stack));
6335 gc_mark_roots(objspace, 0);
6338 gc_report(1, objspace,
"gc_marks_finish: not empty (%d). retry.\n", (
int)mark_stack_size(&objspace->
mark_stack));
6342#if RGENGC_CHECK_MODE >= 2
6343 if (gc_verify_heap_pages(objspace) != 0) {
6344 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
6350 gc_marks_wb_unprotected_objects(objspace);
6354#if RGENGC_CHECK_MODE >= 2
6355 gc_verify_internal_consistency(objspace);
6367#if RGENGC_CHECK_MODE >= 4
6368 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
6385 if (sweep_slots > max_free_slots) {
6396 if (sweep_slots < min_free_slots) {
6397 if (!full_marking) {
6399 full_marking =
TRUE;
6404 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
6410 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6411 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots,
total_slots));
6412 heap_increment(objspace, heap);
6433 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6437 if (sweep_slots < min_free_slots) {
6438 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6439 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6440 heap_increment(objspace, heap);
6455#if GC_ENABLE_INCREMENTAL_MARK
6458 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6459 if (gc_marks_finish(objspace)) {
6471 gc_report(1, objspace,
"gc_marks_rest\n");
6473#if GC_ENABLE_INCREMENTAL_MARK
6479 while (gc_mark_stacked_objects_incremental(objspace,
INT_MAX) ==
FALSE);
6480 }
while (gc_marks_finish(objspace) ==
FALSE);
6483 gc_mark_stacked_objects_all(objspace);
6484 gc_marks_finish(objspace);
6495#if GC_ENABLE_INCREMENTAL_MARK
6497 gc_enter(objspace,
"marks_continue");
6506 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6509 from =
"pooled-pages";
6511 else if (heap_increment(objspace, heap)) {
6513 from =
"incremented-pages";
6517 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
6521 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %d).\n", (
int)mark_stack_size(&objspace->
mark_stack));
6522 gc_marks_rest(objspace);
6527 gc_exit(objspace,
"marks_continue");
6534 gc_prof_mark_timer_start(objspace);
6541 gc_marks_start(objspace, full_mark);
6543 gc_marks_rest(objspace);
6546#if RGENGC_PROFILE > 0
6554 gc_marks_start(objspace,
TRUE);
6555 gc_marks_rest(objspace);
6559 gc_prof_mark_timer_stop(objspace);
6571 const char *status =
" ";
6603 return RVALUE_REMEMBERED(
obj);
6630 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(
obj),
6631 rgengc_remembersetbits_get(objspace,
obj) ?
"was already remembered" :
"is remembered now");
6633 check_rvalue_consistency(
obj);
6636 if (RVALUE_WB_UNPROTECTED(
obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(
obj));
6639#if RGENGC_PROFILE > 0
6640 if (!rgengc_remembered(objspace,
obj)) {
6641 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6642 objspace->
profile.total_remembered_normal_object_count++;
6643#if RGENGC_PROFILE >= 2
6650 return rgengc_remembersetbits_set(objspace,
obj);
6656 int result = rgengc_remembersetbits_get(objspace,
obj);
6657 check_rvalue_consistency(
obj);
6664 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(
obj));
6665 return rgengc_remembered_sweep(objspace,
obj);
6668#ifndef PROFILE_REMEMBERSET_MARK
6669#define PROFILE_REMEMBERSET_MARK 0
6677#if PROFILE_REMEMBERSET_MARK
6678 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6680 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
6690#if PROFILE_REMEMBERSET_MARK
6710 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(
obj));
6714 gc_mark_children(objspace,
obj);
6722#if PROFILE_REMEMBERSET_MARK
6729#if PROFILE_REMEMBERSET_MARK
6730 fprintf(
stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6732 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
6758 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6759 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
6760 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6765 if (!rgengc_remembered(objspace, a)) {
6766 rgengc_remember(objspace, a);
6767 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6772 if (RVALUE_WB_UNPROTECTED(b)) {
6773 gc_remember_unprotected(objspace, b);
6776 RVALUE_AGE_SET_OLD(objspace, b);
6777 rgengc_remember(objspace, b);
6780 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6783 check_rvalue_consistency(a);
6784 check_rvalue_consistency(b);
6787#if GC_ENABLE_INCREMENTAL_MARK
6791 gc_mark_set_parent(objspace, parent);
6792 rgengc_check_relation(objspace,
obj);
6793 if (gc_mark_set(objspace,
obj) ==
FALSE)
return;
6794 gc_aging(objspace,
obj);
6795 gc_grey(objspace,
obj);
6803 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
6805 if (RVALUE_BLACK_P(a)) {
6806 if (RVALUE_WHITE_P(b)) {
6807 if (!RVALUE_WB_UNPROTECTED(a)) {
6808 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
6809 gc_mark_from(objspace, b, a);
6812 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6813 if (!RVALUE_WB_UNPROTECTED(b)) {
6814 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
6815 RVALUE_AGE_SET_OLD(objspace, b);
6817 if (RVALUE_BLACK_P(b)) {
6818 gc_grey(objspace, b);
6822 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
6823 gc_remember_unprotected(objspace, b);
6829#define gc_writebarrier_incremental(a, b, objspace)
6841 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6845 gc_writebarrier_generational(a, b, objspace);
6849 gc_writebarrier_incremental(a, b, objspace);
6856 if (RVALUE_WB_UNPROTECTED(
obj)) {
6862 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(
obj),
6863 rgengc_remembered(objspace,
obj) ?
" (already remembered)" :
"");
6865 if (RVALUE_OLD_P(
obj)) {
6866 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(
obj));
6867 RVALUE_DEMOTE(objspace,
obj);
6868 gc_mark_set(objspace,
obj);
6869 gc_remember_unprotected(objspace,
obj);
6872 objspace->
profile.total_shade_operation_count++;
6873#if RGENGC_PROFILE >= 2
6879 RVALUE_AGE_RESET(
obj);
6895 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(
obj));
6898 if (RVALUE_BLACK_P(
obj)) {
6899 gc_grey(objspace,
obj);
6903 if (RVALUE_OLD_P(
obj)) {
6904 rgengc_remember(objspace,
obj);
6909static st_table *rgengc_unprotect_logging_table;
6919rgengc_unprotect_logging_exit_func(
void)
6921 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6929 if (rgengc_unprotect_logging_table == 0) {
6931 atexit(rgengc_unprotect_logging_exit_func);
6934 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6939 snprintf(
ptr, 0x100 - 1,
"%s|%s:%d", obj_info(
obj), filename, line);
6959 if (RVALUE_WB_UNPROTECTED(
obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6960 if (!RVALUE_OLD_P(dest)) {
6962 RVALUE_AGE_RESET_RAW(dest);
6965 RVALUE_DEMOTE(objspace, dest);
6969 check_rvalue_consistency(dest);
6995 static ID ID_marked;
6997 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
7001#define I(s) ID_##s = rb_intern(#s);
7014 if (RVALUE_WB_UNPROTECTED(
obj) == 0 &&
n<max)
flags[
n++] = ID_wb_protected;
7015 if (RVALUE_OLD_P(
obj) &&
n<max)
flags[
n++] = ID_old;
7016 if (RVALUE_UNCOLLECTIBLE(
obj) &&
n<max)
flags[
n++] = ID_uncollectible;
7032 int is_old = RVALUE_OLD_P(
obj);
7034 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(
obj));
7037 if (RVALUE_MARKED(
obj)) {
7044#if GC_ENABLE_INCREMENTAL_MARK
7058#if GC_ENABLE_INCREMENTAL_MARK
7074#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7075#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7110 if (tmp->
varptr == addr) {
7142#define gc_stress_full_mark_after_malloc_p() \
7143 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7149 if (!heap_increment(objspace, heap)) {
7150 heap_set_increment(objspace, 1);
7151 heap_increment(objspace, heap);
7171 gc_prof_set_malloc_info(objspace);
7202#if RGENGC_ESTIMATE_OLDMALLOC
7239#if GC_PROFILE_MORE_DETAIL
7240 objspace->
profile.prepare_time = getrusage_time();
7245#if GC_PROFILE_MORE_DETAIL
7246 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
7249 return gc_start(objspace, reason);
7267#if RGENGC_CHECK_MODE >= 2
7268 gc_verify_internal_consistency(objspace);
7271 gc_enter(objspace,
"gc_start");
7277 do_full_mark =
TRUE;
7286 do_full_mark =
TRUE;
7290 do_full_mark =
TRUE;
7301#if GC_ENABLE_INCREMENTAL_MARK
7316 gc_report(1, objspace,
"gc_start(reason: %d) => %u, %d, %d\n",
7320#if USE_DEBUG_COUNTER
7328#if RGENGC_ESTIMATE_OLDMALLOC
7345 gc_prof_setup_new_record(objspace, reason);
7346 gc_reset_malloc_info(objspace);
7352 gc_prof_timer_start(objspace);
7354 gc_marks(objspace, do_full_mark);
7356 gc_prof_timer_stop(objspace);
7358 gc_exit(objspace,
"gc_start");
7368 if (marking || sweeping) {
7369 gc_enter(objspace,
"gc_rest");
7375 gc_marks_rest(objspace);
7379 gc_sweep_rest(objspace);
7381 gc_exit(objspace,
"gc_rest");
7398#if GC_ENABLE_INCREMENTAL_MARK
7416 static char buff[0x10];
7417 gc_current_status_fill(objspace, buff);
7421#if PRINT_ENTER_EXIT_TICK
7423static tick_t last_exit_tick;
7424static tick_t enter_tick;
7425static int enter_count = 0;
7426static char last_gc_status[0x10];
7429gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7431 if (direction == 0) {
7433 enter_tick = tick();
7434 gc_current_status_fill(objspace, last_gc_status);
7437 tick_t exit_tick = tick();
7438 char current_gc_status[0x10];
7439 gc_current_status_fill(objspace, current_gc_status);
7442 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7443 enter_tick - last_exit_tick,
7444 exit_tick - enter_tick,
7446 last_gc_status, current_gc_status,
7448 last_exit_tick = exit_tick;
7451 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7453 exit_tick - enter_tick,
7455 last_gc_status, current_gc_status,
7462gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7477 gc_report(1, objspace,
"gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7478 gc_record(objspace, 0, event);
7488 gc_record(objspace, 1, event);
7489 gc_report(1, objspace,
"gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7496gc_with_gvl(
void *
ptr)
7533 if (!
RTEST(full_mark))
reason &= ~GPR_FLAG_FULL_MARK;
7534 if (!
RTEST(immediate_mark))
reason &= ~GPR_FLAG_IMMEDIATE_MARK;
7535 if (!
RTEST(immediate_sweep))
reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
7585 return RVALUE_MARKED(
obj) && !RVALUE_PINNED(
obj);
7613 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)
src);
7614 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)
src);
7615 marking = RVALUE_MARKING((
VALUE)
src);
7658 if (wb_unprotected) {
7665 if (uncollectible) {
7674 src->as.moved.destination = (
VALUE)dest;
7675 src->as.moved.next = moved_list;
7691 if (
free->slot ==
free->page->start +
free->page->total_slots - 1) {
7693 free->page = page_list[
free->index];
7730 size_t total_pages =
heap_eden->total_pages;
7731 page = page_list[0];
7736 free->objspace = objspace;
7738 page = page_list[total_pages - 1];
7739 scan->
index = total_pages - 1;
7759compare_pinned(
const void *left,
const void *right,
void *dummy)
7764 left_page = *(
struct heap_page *
const *)left;
7765 right_page = *(
struct heap_page *
const *)right;
7771compare_free_slots(
const void *left,
const void *right,
void *dummy)
7776 left_page = *(
struct heap_page *
const *)left;
7777 right_page = *(
struct heap_page *
const *)right;
7787 size_t total_pages =
heap_eden->total_pages;
7793 page_list[
i++] = page;
7817 page_list = allocate_page_list(objspace, comparator);
7819 init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7822 while (not_met(&free_cursor, &scan_cursor)) {
7826 void *free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7827 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7829 while (
BUILTIN_TYPE(free_cursor.slot) !=
T_NONE && not_met(&free_cursor, &scan_cursor)) {
7831 if (free_slot_poison) {
7833 asan_poison_object((
VALUE)free_cursor.slot);
7836 advance_cursor(&free_cursor, page_list);
7839 free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7840 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7844 void *scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7845 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7850 while (!gc_is_moveable_obj(objspace, (
VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7853 if (scan_slot_poison) {
7855 asan_poison_object((
VALUE)scan_cursor.slot);
7858 retreat_cursor(&scan_cursor, page_list);
7861 scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7862 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7867 if (not_met(&free_cursor, &scan_cursor)) {
7874 moved_list = gc_move(objspace, (
VALUE)scan_cursor.slot, (
VALUE)free_cursor.slot, moved_list);
7880 advance_cursor(&free_cursor, page_list);
7881 retreat_cursor(&scan_cursor, page_list);
7900 for (
i = 0;
i <
len;
i++) {
7913 for (
i = 0;
i <
len;
i++) {
7924 if (gc_object_moved_p(objspace, (
VALUE)*
key)) {
7928 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7942 if (gc_object_moved_p(objspace, (
VALUE)
key)) {
7946 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7957 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7971 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8002 gc_update_table_refs(objspace,
ptr);
8020 switch (def->
type) {
8057 for (
i=0;
i<
n;
i++) {
8071 gc_update_values(objspace, (
long)
env->env_size, (
VALUE *)
env->env);
8095 gc_ref_update_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
8113check_id_table_move(
ID id,
VALUE value,
void *data)
8117 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8133 void *poisoned = asan_poisoned_object_p(value);
8134 asan_unpoison_object(value,
false);
8141 destination = value;
8147 asan_poison_object(value);
8151 destination = value;
8158update_id_table(
ID *
key,
VALUE * value,
void *data,
int existing)
8162 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8178update_const_table(
VALUE value,
void *data)
8183 if (gc_object_moved_p(objspace, ce->
value)) {
8187 if (gc_object_moved_p(objspace, ce->
file)) {
8206 entry = entry->
next;
8215 update_subclass_entries(objspace, ext->
subclasses);
8223 gc_report(4, objspace,
"update-refs: %p ->", (
void *)
obj);
8254 gc_ref_update_imemo(objspace,
obj);
8270 gc_ref_update_array(objspace,
obj);
8275 gc_ref_update_hash(objspace,
obj);
8292 if (compact_func) (*compact_func)(
ptr);
8299 gc_ref_update_object(objspace,
obj);
8329 if (any->as.match.str) {
8350 for (
i = 0;
i <
len;
i++) {
8367 gc_report(4, objspace,
"update-refs: %p <-", (
void *)
obj);
8371gc_ref_update(
void *vstart,
void *vend,
size_t stride,
void * data)
8380 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
8387 for (;
v != (
VALUE)vend;
v += stride) {
8389 void *poisoned = asan_poisoned_object_p(
v);
8390 asan_unpoison_object(
v,
false);
8394 heap_page_add_freeobj(objspace, page,
v);
8402 if (RVALUE_WB_UNPROTECTED(
v)) {
8408 gc_update_object_references(objspace,
v);
8413 asan_poison_object(
v);
8423#define global_symbols ruby_global_symbols
8429 rb_vm_t *vm = rb_ec_vm_ptr(ec);
8431 objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8466static void gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier);
8469gc_compact(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8477 gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8489 return gc_compact_stats(objspace);
8493root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
8501reachable_object_check_moved_i(
VALUE ref,
void *data)
8505 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
8510heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
8513 for (;
v != (
VALUE)vend;
v += stride) {
8518 void *poisoned = asan_poisoned_object_p(
v);
8519 asan_unpoison_object(
v,
false);
8531 asan_poison_object(
v);
8542 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i,
NULL);
8543 objspace_each_objects(objspace, heap_check_moved_i,
NULL);
8548gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8550 if (0)
fprintf(
stderr,
"gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8557 gc_verify_internal_consistency(objspace);
8560 if (use_double_pages) {
8565 VALUE moved_list_head;
8568 if (use_toward_empty) {
8569 moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8572 moved_list_head = gc_compact_heap(objspace, compare_pinned);
8576 gc_update_references(objspace);
8580 gc_check_references_for_moved(objspace);
8589 while (moved_list_head) {
8594 next_moved =
RMOVED(moved_list_head)->next;
8597 RMOVED(moved_list_head)->flags = 0;
8598 RMOVED(moved_list_head)->destination = 0;
8599 RMOVED(moved_list_head)->next = 0;
8601 heap_page_add_freeobj(objspace, page, moved_list_head);
8605 heap_unlink_page(objspace,
heap_eden, page);
8606 heap_add_page(objspace,
heap_tomb, page);
8609 moved_list_head = next_moved;
8629 gc_verify_internal_consistency(objspace);
8655 int use_toward_empty =
FALSE;
8656 int use_double_pages =
FALSE;
8661 static ID keyword_ids[2];
8669 if (!keyword_ids[0]) {
8671 keyword_ids[1] =
rb_intern(
"double_heap");
8676 use_toward_empty =
TRUE;
8679 use_double_pages =
TRUE;
8683 gc_compact(objspace, use_toward_empty, use_double_pages,
TRUE);
8684 return gc_compact_stats(objspace);
8699 garbage_collect(objspace, reason);
8709#if RGENGC_PROFILE >= 2
8714gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *
types)
8719 const char *
type = type_name(
i, 0);
8739gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
8741 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8742 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8743#if RGENGC_ESTIMATE_OLDMALLOC
8744 static VALUE sym_oldmalloc;
8746 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8747 static VALUE sym_none, sym_marking, sym_sweeping;
8762 if (sym_major_by ==
Qnil) {
8763#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8775#if RGENGC_ESTIMATE_OLDMALLOC
8789#define SET(name, attr) \
8790 if (key == sym_##name) \
8792 else if (hash != Qnil) \
8793 rb_hash_aset(hash, sym_##name, (attr));
8800#if RGENGC_ESTIMATE_OLDMALLOC
8804 SET(major_by, major_by);
8818 if (orig_flags == 0) {
8835 return gc_info_decode(objspace,
key, 0);
8850 return gc_info_decode(objspace,
arg, 0);
8879#if RGENGC_ESTIMATE_OLDMALLOC
8884 gc_stat_sym_total_generated_normal_object_count,
8885 gc_stat_sym_total_generated_shady_object_count,
8886 gc_stat_sym_total_shade_operation_count,
8887 gc_stat_sym_total_promoted_count,
8888 gc_stat_sym_total_remembered_normal_object_count,
8889 gc_stat_sym_total_remembered_shady_object_count,
8915#if RGENGC_ESTIMATE_OLDMALLOC
8924static VALUE gc_stat_compat_table;
8927setup_gc_stat_symbols(
void)
8929 if (gc_stat_symbols[0] == 0) {
8930#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8933 S(heap_sorted_length);
8935 S(heap_available_slots);
8938 S(heap_final_slots);
8939 S(heap_marked_slots);
8942 S(total_allocated_pages);
8943 S(total_freed_pages);
8944 S(total_allocated_objects);
8945 S(total_freed_objects);
8946 S(malloc_increase_bytes);
8947 S(malloc_increase_bytes_limit);
8952 S(remembered_wb_unprotected_objects);
8953 S(remembered_wb_unprotected_objects_limit);
8955 S(old_objects_limit);
8956#if RGENGC_ESTIMATE_OLDMALLOC
8957 S(oldmalloc_increase_bytes);
8958 S(oldmalloc_increase_bytes_limit);
8961 S(total_generated_normal_object_count);
8962 S(total_generated_shady_object_count);
8963 S(total_shade_operation_count);
8964 S(total_promoted_count);
8965 S(total_remembered_normal_object_count);
8966 S(total_remembered_shady_object_count);
8970#define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8971 S(gc_stat_heap_used);
8972 S(heap_eden_page_length);
8973 S(heap_tomb_page_length);
8981 S(remembered_shady_object);
8982 S(remembered_shady_object_limit);
8984 S(old_object_limit);
8986 S(total_allocated_object);
8987 S(total_freed_object);
8990#if RGENGC_ESTIMATE_OLDMALLOC
8991 S(oldmalloc_increase);
9002#define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
9003#define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
9022#if RGENGC_ESTIMATE_OLDMALLOC
9038 if (!
NIL_P(new_key)) {
9039 static int warned = 0;
9041 rb_warn(
"GC.stat keys were changed from Ruby 2.1. "
9043 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9061 if ((new_key = compat_key(
key)) !=
Qnil) {
9069gc_stat_internal(
VALUE hash_or_sym)
9074 setup_gc_stat_symbols();
9080 static VALUE default_proc_for_compat = 0;
9081 if (default_proc_for_compat == 0) {
9082 default_proc_for_compat =
rb_proc_new(default_proc_for_compat_func,
Qnil);
9095#define SET(name, attr) \
9096 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9098 else if (hash != Qnil) \
9099 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9108 SET(heap_available_slots, objspace_available_slots(objspace));
9109 SET(heap_live_slots, objspace_live_slots(objspace));
9110 SET(heap_free_slots, objspace_free_slots(objspace));
9129#if RGENGC_ESTIMATE_OLDMALLOC
9135 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
9136 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
9137 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
9138 SET(total_promoted_count, objspace->
profile.total_promoted_count);
9139 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
9140 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
9147 if ((new_key = compat_key(
key)) !=
Qnil) {
9154#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9156 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
9157 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
9158 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
9159 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
9160 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
9161 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
9175 size_t value = gc_stat_internal(
arg);
9185 gc_stat_internal(
arg);
9193 size_t value = gc_stat_internal(
key);
9197 gc_stat_internal(
key);
9220 gc_stress_set(objspace, flag);
9250 return gc_disable_no_rest(objspace);
9272 return gc_disable_no_rest(objspace);
9282get_envparam_size(
const char *
name,
size_t *default_value,
size_t lower_bound)
9290#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9305 unit = 1024*1024*1024;
9309 while (*end && isspace((
unsigned char)*end)) end++;
9321 if (val > 0 && (
size_t)val > lower_bound) {
9325 *default_value = (
size_t)val;
9331 name, val, *default_value, lower_bound);
9340get_envparam_double(
const char *
name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
9348 if (!*
ptr || *end) {
9353 if (accept_zero && val == 0.0) {
9356 else if (val <= lower_bound) {
9358 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9359 name, val, *default_value, lower_bound);
9362 else if (upper_bound != 0.0 &&
9363 val > upper_bound) {
9365 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9366 name, val, *default_value, upper_bound);
9372 *default_value = val;
9380gc_set_initial_pages(
void)
9386 if (min_pages >
heap_eden->total_pages) {
9437 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
9440 else if (get_envparam_size(
"RUBY_FREE_MIN", &gc_params.
heap_free_slots, 0)) {
9441 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9445 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
9446 gc_set_initial_pages();
9448 else if (get_envparam_size(
"RUBY_HEAP_MIN_SLOTS", &gc_params.
heap_init_slots, 0)) {
9449 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9450 gc_set_initial_pages();
9453 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
9454 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
9463 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
9464 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
9470#if RGENGC_ESTIMATE_OLDMALLOC
9485 if (is_markable_object(objspace,
obj)) {
9486 struct mark_func_data_struct mfd;
9487 mfd.mark_func = func;
9490 gc_mark_children(objspace,
obj);
9512 objspace_reachable_objects_from_root(objspace,
func, passing_data);
9519 struct mark_func_data_struct mfd;
9522 data.data = passing_data;
9524 mfd.mark_func = root_objects_from;
9528 gc_mark_roots(objspace, &data.category);
9580negative_size_allocation_error(
const char *msg)
9586ruby_memerror_body(
void *dummy)
9623 if (
during_gc) gc_exit(objspace,
"rb_memerror");
9647#if defined __MINGW32__
9648 res = __mingw_aligned_malloc(
size, alignment);
9650 void *_aligned_malloc(
size_t,
size_t);
9651 res = _aligned_malloc(
size, alignment);
9652#elif defined(HAVE_POSIX_MEMALIGN)
9659#elif defined(HAVE_MEMALIGN)
9663 res =
malloc(alignment +
size +
sizeof(
void*));
9664 aligned = (
char*)res + alignment +
sizeof(
void*);
9665 aligned -= ((
VALUE)aligned & (alignment - 1));
9666 ((
void**)aligned)[-1] = res;
9667 res = (
void*)aligned;
9671 GC_ASSERT(((alignment - 1) & alignment) == 0);
9672 GC_ASSERT(alignment %
sizeof(
void*) == 0);
9677rb_aligned_free(
void *
ptr)
9679#if defined __MINGW32__
9680 __mingw_aligned_free(
ptr);
9683#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9693#ifdef HAVE_MALLOC_USABLE_SIZE
9694 return malloc_usable_size(
ptr);
9707atomic_sub_nounderflow(
size_t *var,
size_t sub)
9709 if (
sub == 0)
return;
9713 if (val <
sub)
sub = val;
9728 garbage_collect_with_gvl(objspace, reason);
9735 if (new_size > old_size) {
9737#if RGENGC_ESTIMATE_OLDMALLOC
9743#if RGENGC_ESTIMATE_OLDMALLOC
9759#if MALLOC_ALLOCATED_SIZE
9760 if (new_size >= old_size) {
9764 size_t dec_size = old_size - new_size;
9765 size_t allocated_size = objspace->
malloc_params.allocated_size;
9767#if MALLOC_ALLOCATED_SIZE_CHECK
9768 if (allocated_size < dec_size) {
9769 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
9772 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
9775 if (0)
fprintf(
stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9780 (
int)new_size, (
int)old_size);
9789 if (allocations > 0) {
9790 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
9792#if MALLOC_ALLOCATED_SIZE_CHECK
9806#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9813#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9814const char *ruby_malloc_info_file;
9815int ruby_malloc_info_line;
9823#if CALC_EXACT_MALLOC_SIZE
9833 size = objspace_malloc_size(objspace, mem,
size);
9836#if CALC_EXACT_MALLOC_SIZE
9840#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9842 info->file = ruby_malloc_info_file;
9843 info->line = info->file ? ruby_malloc_info_line : 0;
9854#define TRY_WITH_GC(alloc) do { \
9855 objspace_malloc_gc_stress(objspace); \
9857 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9858 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9859 GPR_FLAG_MALLOC) || \
9873 size = objspace_malloc_prepare(objspace,
size);
9876 return objspace_malloc_fixup(objspace, mem,
size);
9880xmalloc2_size(
const size_t count,
const size_t elsize)
9886objspace_xrealloc(
rb_objspace_t *objspace,
void *
ptr,
size_t new_size,
size_t old_size)
9890 if (!
ptr)
return objspace_xmalloc0(objspace, new_size);
9897 if (new_size == 0) {
9898 if ((mem = objspace_xmalloc0(objspace, 0)) !=
NULL) {
9921 objspace_xfree(objspace,
ptr, old_size);
9935#if CALC_EXACT_MALLOC_SIZE
9940 old_size = info->
size;
9944 old_size = objspace_malloc_size(objspace,
ptr, old_size);
9946 new_size = objspace_malloc_size(objspace, mem, new_size);
9948#if CALC_EXACT_MALLOC_SIZE
9951 info->
size = new_size;
9962#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9964#define MALLOC_INFO_GEN_SIZE 100
9965#define MALLOC_INFO_SIZE_SIZE 10
9966static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9967static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9968static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9969static st_table *malloc_info_file_table;
9974 const char *file = (
void *)
key;
9975 const size_t *data = (
void *)val;
9977 fprintf(
stderr,
"%s\t%d\t%d\n", file, (
int)data[0], (
int)data[1]);
9989 for (
i=0;
i<MALLOC_INFO_GEN_SIZE;
i++) {
9990 if (
i == MALLOC_INFO_GEN_SIZE-1) {
9991 fprintf(
stderr,
"more\t%d\t%d\n", (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9994 fprintf(
stderr,
"%d\t%d\t%d\n",
i, (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9999 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
10005 if (malloc_info_file_table) {
10007 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10027#if CALC_EXACT_MALLOC_SIZE
10030 old_size = info->
size;
10032#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10035 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10038 malloc_info_gen_cnt[gen_index]++;
10039 malloc_info_gen_size[gen_index] += info->
size;
10041 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
10042 size_t s = 16 <<
i;
10043 if (info->
size <= s) {
10044 malloc_info_size[
i]++;
10048 malloc_info_size[
i]++;
10055 if (malloc_info_file_table ==
NULL) {
10062 data =
malloc(xmalloc2_size(2,
sizeof(
size_t)));
10063 if (data ==
NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
10064 data[0] = data[1] = 0;
10068 data[1] += info->
size;
10073 fprintf(
stderr,
"free - size:%d, gen:%d, pos: %s:%d\n", (
int)info->
size, gen, info->file, (
int)info->line);
10083 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10092ruby_xmalloc0(
size_t size)
10101 negative_size_allocation_error(
"too large allocation size");
10103 return ruby_xmalloc0(
size);
10125 size = objspace_malloc_prepare(objspace,
size);
10127 return objspace_malloc_fixup(objspace, mem,
size);
10136#ifdef ruby_sized_xrealloc
10137#undef ruby_sized_xrealloc
10143 negative_size_allocation_error(
"too large allocation size");
10146 return objspace_xrealloc(&
rb_objspace,
ptr, new_size, old_size);
10155#ifdef ruby_sized_xrealloc2
10156#undef ruby_sized_xrealloc2
10161 size_t len = xmalloc2_size(
n,
size);
10171#ifdef ruby_sized_xfree
10172#undef ruby_sized_xfree
10191 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10198 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10205 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10212 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10223#if CALC_EXACT_MALLOC_SIZE
10227#if CALC_EXACT_MALLOC_SIZE
10236#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10252#if CALC_EXACT_MALLOC_SIZE
10268 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
NULL, 0);
10300#if MALLOC_ALLOCATED_SIZE
10311gc_malloc_allocated_size(
VALUE self)
10326gc_malloc_allocations(
VALUE self)
10339 else if (diff < 0) {
10354#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10356#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10368wmap_compact(
void *
ptr)
10377wmap_mark(
void *
ptr)
10380#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10395wmap_free(
void *
ptr)
10412wmap_memsize(
const void *
ptr)
10452 if (!is_id_value(objspace,
obj))
return FALSE;
10453 if (!is_live_object(objspace,
obj))
return FALSE;
10461 if (!existing)
return ST_STOP;
10464 if (
ptr[
i] != wmap) {
10491 rb_bug(
"wmap_finalize: objid is not found.");
10497 rids = (
VALUE *)data;
10525 else if (wmap_live_p(objspace,
obj)) {
10556wmap_inspect(
VALUE self)
10588wmap_each(
VALUE self)
10603 if (wmap_live_p(objspace,
obj)) {
10611wmap_each_key(
VALUE self)
10626 if (wmap_live_p(objspace,
obj)) {
10634wmap_each_value(
VALUE self)
10659wmap_keys(
VALUE self)
10686wmap_values(
VALUE self)
10710 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
10727 define_final0(orig, w->
final);
10730 define_final0(wmap, w->
final);
10750 if (!wmap_live_p(objspace,
obj))
return Qundef;
10771wmap_size(
VALUE self)
10778#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10789#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10793getrusage_time(
void)
10795#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10797 static int try_clock_gettime = 1;
10803 try_clock_gettime = 0;
10810 struct rusage usage;
10812 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10813 time = usage.ru_utime;
10814 return time.tv_sec +
time.tv_usec * 1e-6;
10821 FILETIME creation_time, exit_time, kernel_time, user_time;
10826 if (GetProcessTimes(GetCurrentProcess(),
10827 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10828 memcpy(&ui, &user_time,
sizeof(FILETIME));
10829 q = ui.QuadPart / 10L;
10830 t = (
DWORD)(q % 1000000L) * 1e-6;
10836 t += (
DWORD)q & ~(~0 << 16);
10847gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
10868 rb_bug(
"gc_profile malloc or realloc miss");
10875#if MALLOC_ALLOCATED_SIZE
10878#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10881 struct rusage usage;
10882 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10883 record->maxrss = usage.ru_maxrss;
10884 record->minflt = usage.ru_minflt;
10885 record->majflt = usage.ru_majflt;
10898#if GC_PROFILE_MORE_DETAIL
10899 record->prepare_time = objspace->
profile.prepare_time;
10907elapsed_time_from(
double time)
10909 double now = getrusage_time();
10928#define RUBY_DTRACE_GC_HOOK(name) \
10929 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10934#if GC_PROFILE_MORE_DETAIL
10945#if GC_PROFILE_MORE_DETAIL
10948 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10978 record->
gc_time += sweep_time;
10984#if GC_PROFILE_MORE_DETAIL
10985 record->gc_sweep_time += sweep_time;
10995#if GC_PROFILE_MORE_DETAIL
11012#if GC_PROFILE_MORE_DETAIL
11014 record->heap_live_objects = live;
11015 record->heap_free_objects = total - live;
11033gc_profile_clear(
VALUE _)
11098gc_profile_record_get(
VALUE _)
11120#if GC_PROFILE_MORE_DETAIL
11135#if RGENGC_PROFILE > 0
11146#if GC_PROFILE_MORE_DETAIL
11147#define MAJOR_REASON_MAX 0x10
11150gc_profile_dump_major_reason(
int flags,
char *buff)
11161 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11162 buff[i++] = #x[0]; \
11163 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11169#if RGENGC_ESTIMATE_OLDMALLOC
11183#ifdef MAJOR_REASON_MAX
11184 char reason_str[MAJOR_REASON_MAX];
11192 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11201#if GC_PROFILE_MORE_DETAIL
11204 "Prepare Time = Previously GC's rest sweep time\n"
11205 "Index Flags Allocate Inc. Allocate Limit"
11209 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11211 " OldgenObj RemNormObj RemShadObj"
11214 " MaxRSS(KB) MinorFLT MajorFLT"
11234 gc_profile_dump_major_reason(record->
flags, reason_str),
11241 record->allocate_increase, record->allocate_limit,
11243 record->allocated_size,
11245 record->heap_use_pages,
11246 record->gc_mark_time*1000,
11247 record->gc_sweep_time*1000,
11248 record->prepare_time*1000,
11250 record->heap_live_objects,
11251 record->heap_free_objects,
11252 record->removing_objects,
11253 record->empty_objects
11256 record->old_objects,
11257 record->remembered_normal_objects,
11258 record->remembered_shady_objects
11262 record->maxrss / 1024,
11285gc_profile_result(
VALUE _)
11320gc_profile_total_time(
VALUE self)
11344gc_profile_enable_get(
VALUE self)
11359gc_profile_enable(
VALUE _)
11376gc_profile_disable(
VALUE _)
11393#define TYPE_NAME(t) case (t): return #t;
11452 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
11456# define ARY_SHARED_P(ary) \
11457 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11458 FL_TEST((ary),ELTS_SHARED)!=0)
11459# define ARY_EMBED_P(ary) \
11460 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11461 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11464rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *
iseq)
11469 snprintf(buff, buff_size,
" %s@%s:%d",
11481#define BUFF_ARGS buff + pos, buff_size - pos
11482#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11494#define TF(c) ((c) != 0 ? "true" : "false")
11495#define C(c, s) ((c) != 0 ? (s) : " ")
11498 const int age = RVALUE_FLAGS_AGE(
RBASIC(
obj)->flags);
11508 obj_type_name(
obj)));
11514 obj_type_name(
obj)));
11520 obj_type_name(
obj)));
11523 if (internal_object_p(
obj)) {
11532 if (!
NIL_P(class_path)) {
11586 if (!
NIL_P(class_path)) {
11594 if (!
NIL_P(class_path)) {
11616 (block = vm_proc_block(
obj)) !=
NULL &&
11618 (
iseq = vm_block_iseq(block)) !=
NULL) {
11630 const char *imemo_name =
"\0";
11632#define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11653 APPENDF((
BUFF_ARGS,
"(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11687#define OBJ_INFO_BUFFERS_NUM 10
11688#define OBJ_INFO_BUFFERS_SIZE 0x100
11689static int obj_info_buffers_index = 0;
11690static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11695 const int index = obj_info_buffers_index++;
11696 char *
const buff = &obj_info_buffers[
index][0];
11698 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11699 obj_info_buffers_index = 0;
11708 return obj_type_name(
obj);
11715 return obj_info(
obj);
11747 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
11760 fprintf(
stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(
obj) ?
"false" :
"true");
11761 fprintf(
stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(
obj) ?
"true" :
"false");
11766 fprintf(
stderr,
"swept?: %s\n", is_swept_object(objspace,
obj) ?
"done" :
"not yet");
11776 fprintf(
stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)
name, (
void *)
obj);
11788#if GC_DEBUG_STRESS_TO_CLASS
11893 VALUE rb_mObjSpace;
11894 VALUE rb_mProfiler;
11895 VALUE gc_constants;
11959#if MALLOC_ALLOCATED_SIZE
11964#if GC_DEBUG_STRESS_TO_CLASS
11973#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11994#ifdef ruby_xmalloc2
11995#undef ruby_xmalloc2
12000#ifdef ruby_xrealloc
12001#undef ruby_xrealloc
12003#ifdef ruby_xrealloc2
12004#undef ruby_xrealloc2
12010#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12011 ruby_malloc_info_file = __FILE__;
12012 ruby_malloc_info_line = __LINE__;
12020#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12021 ruby_malloc_info_file = __FILE__;
12022 ruby_malloc_info_line = __LINE__;
12030#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12031 ruby_malloc_info_file = __FILE__;
12032 ruby_malloc_info_line = __LINE__;
12040#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12041 ruby_malloc_info_file = __FILE__;
12042 ruby_malloc_info_line = __LINE__;
12050#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12051 ruby_malloc_info_file = __FILE__;
12052 ruby_malloc_info_line = __LINE__;
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
struct rb_encoding_entry * list
char str[HTML_ESCAPE_MAX_LEN+1]
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
#define GC_OLDMALLOC_LIMIT_MAX
int rb_objspace_internal_object_p(VALUE obj)
VALUE * ruby_initial_gc_stress_ptr
#define GC_MALLOC_LIMIT_MAX
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
#define MALLOC_ALLOCATED_SIZE
#define GC_ENABLE_INCREMENTAL_MARK
#define ARY_SHARED_P(ary)
#define obj_id_to_ref(objid)
#define CALC_EXACT_MALLOC_SIZE
@ gc_stat_sym_oldmalloc_increase_bytes
@ gc_stat_sym_total_freed_objects
@ gc_stat_sym_old_objects
@ gc_stat_sym_total_allocated_objects
@ gc_stat_sym_compact_count
@ gc_stat_sym_heap_allocatable_pages
@ gc_stat_sym_old_objects_limit
@ gc_stat_sym_heap_live_slots
@ gc_stat_sym_heap_free_slots
@ gc_stat_sym_heap_marked_slots
@ gc_stat_sym_total_allocated_pages
@ gc_stat_sym_oldmalloc_increase_bytes_limit
@ gc_stat_sym_heap_available_slots
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
@ gc_stat_sym_malloc_increase_bytes_limit
@ gc_stat_sym_heap_final_slots
@ gc_stat_sym_total_freed_pages
@ gc_stat_sym_heap_sorted_length
@ gc_stat_sym_heap_tomb_pages
@ gc_stat_sym_heap_allocated_pages
@ gc_stat_sym_heap_eden_pages
@ gc_stat_sym_remembered_wb_unprotected_objects
@ gc_stat_sym_minor_gc_count
@ gc_stat_sym_malloc_increase_bytes
@ gc_stat_sym_major_gc_count
#define RGENGC_ESTIMATE_OLDMALLOC
@ REQUIRED_SIZE_BY_MALLOC
@ HEAP_PAGE_BITMAP_PLANES
#define RGENGC_CHECK_MODE
#define GC_HEAP_GROWTH_MAX_SLOTS
int rb_objspace_garbage_object_p(VALUE obj)
#define GC_PROFILE_DETAIL_MEMORY
#define RVALUE_PIN_BITMAP(obj)
VALUE rb_gc_location(VALUE value)
#define heap_pages_final_slots
struct stack_chunk stack_chunk_t
#define GC_MALLOC_LIMIT_MIN
void ruby_sized_xfree(void *x, size_t size)
VALUE rb_gc_disable(void)
size_t rb_objspace_data_type_memsize(VALUE obj)
#define is_marking(objspace)
#define gc_mode(objspace)
#define gc_prof_enabled(objspace)
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
#define UNEXPECTED_NODE(func)
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
void rb_mark_tbl_no_pin(st_table *tbl)
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
#define heap_pages_freeable_pages
void ruby_mimfree(void *ptr)
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
#define heap_pages_deferred_final
MJIT_FUNC_EXPORTED int rb_ec_stack_check(rb_execution_context_t *ec)
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
VALUE rb_undefine_finalizer(VALUE obj)
#define ruby_gc_stress_mode
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
rb_symbols_t ruby_global_symbols
#define MALLOC_ALLOCATED_SIZE_CHECK
struct mark_stack mark_stack_t
void rb_mark_set(st_table *tbl)
#define rb_data_object_alloc
void * ruby_xcalloc(size_t n, size_t size)
void * ruby_xmalloc2_body(size_t n, size_t size)
#define CLEAR_IN_BITMAP(bits, p)
#define HEAP_PAGE_ALIGN_LOG
#define GET_HEAP_MARKING_BITS(x)
void rb_mark_hash(st_table *tbl)
VALUE rb_obj_id(VALUE obj)
void rb_gc_mark_movable(VALUE ptr)
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
void rb_gc_mark_maybe(VALUE obj)
#define GC_ENABLE_LAZY_SWEEP
NORETURN(static void negative_size_allocation_error(const char *))
#define MARK_IN_BITMAP(bits, p)
#define GC_PROFILE_RECORD_DEFAULT_SIZE
#define RVALUE_PAGE_MARKING(page, obj)
int rb_objspace_marked_object_p(VALUE obj)
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
void * rb_aligned_malloc(size_t alignment, size_t size)
void rb_mark_tbl(st_table *tbl)
MJIT_FUNC_EXPORTED void rb_gc_writebarrier_remember(VALUE obj)
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
#define malloc_allocated_size
#define TRY_WITH_GC(alloc)
void * ruby_mimmalloc(size_t size)
#define GC_HEAP_INIT_SLOTS
#define heap_pages_sorted_length
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
size_t rb_obj_memsize_of(VALUE obj)
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
const char * rb_method_type_name(rb_method_type_t type)
@ gc_stat_compat_sym_oldmalloc_limit
@ gc_stat_compat_sym_old_object
@ gc_stat_compat_sym_remembered_shady_object
@ gc_stat_compat_sym_heap_live_slot
@ gc_stat_compat_sym_heap_length
@ gc_stat_compat_sym_last
@ gc_stat_compat_sym_malloc_increase
@ gc_stat_compat_sym_total_freed_object
@ gc_stat_compat_sym_old_object_limit
@ gc_stat_compat_sym_heap_free_slot
@ gc_stat_compat_sym_heap_swept_slot
@ gc_stat_compat_sym_remembered_shady_object_limit
@ gc_stat_compat_sym_heap_increment
@ gc_stat_compat_sym_malloc_limit
@ gc_stat_compat_sym_heap_final_slot
@ gc_stat_compat_sym_oldmalloc_increase
@ gc_stat_compat_sym_heap_tomb_page_length
@ gc_stat_compat_sym_gc_stat_heap_used
@ gc_stat_compat_sym_total_allocated_object
@ gc_stat_compat_sym_heap_eden_page_length
#define STACKFRAME_FOR_CALL_CFUNC
VALUE rb_memory_id(VALUE obj)
PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char *,...)), 2, 3)
#define RGENGC_OLD_NEWOBJ_CHECK
struct gc_profile_record gc_profile_record
const char * rb_objspace_data_type_name(VALUE obj)
rb_objspace_t * rb_objspace_alloc(void)
#define gc_mode_set(objspace, mode)
#define POP_MARK_FUNC_DATA()
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
void rb_gc_force_recycle(VALUE obj)
#define GET_STACK_BOUNDS(start, end, appendix)
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
void ruby_gc_set_params(void)
void rb_objspace_set_event_hook(const rb_event_flag_t event)
void rb_gcdebug_print_obj_condition(VALUE obj)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
void * ruby_xrealloc_body(void *ptr, size_t new_size)
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
void rb_iseq_update_references(rb_iseq_t *iseq)
#define GET_HEAP_MARK_BITS(x)
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
int rb_objspace_markable_object_p(VALUE obj)
void rb_gc_update_tbl_refs(st_table *ptr)
void rb_gc_mark(VALUE ptr)
void rb_gc_verify_internal_consistency(void)
#define gc_event_hook_available_p(objspace)
void ruby_malloc_size_overflow(size_t count, size_t elsize)
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
#define RVALUE_MARK_BITMAP(obj)
void rb_gc_mark_values(long n, const VALUE *values)
void * ruby_xmalloc(size_t size)
void rb_obj_info_dump(VALUE obj)
#define gc_prof_record(objspace)
#define MARK_CHECKPOINT(category)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
#define UPDATE_IF_MOVED(_objspace, _thing)
#define PUSH_MARK_FUNC_DATA(v)
#define is_incremental_marking(objspace)
void rb_vm_update_references(void *ptr)
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
#define is_sweeping(objspace)
int ruby_stack_grow_direction
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n))
VALUE rb_define_finalizer(VALUE obj, VALUE block)
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
#define MARK_OBJECT_ARY_BUCKET_SIZE
#define has_sweeping_pages(heap)
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr))
#define RGENGC_FORCE_MAJOR_GC
void * ruby_xmalloc2(size_t n, size_t size)
@ GPR_FLAG_MAJOR_BY_FORCE
@ GPR_FLAG_IMMEDIATE_SWEEP
@ GPR_FLAG_MAJOR_BY_OLDMALLOC
@ GPR_FLAG_MAJOR_BY_SHADY
@ GPR_FLAG_MAJOR_BY_NOFREE
@ GPR_FLAG_IMMEDIATE_MARK
@ GPR_FLAG_MAJOR_BY_OLDGEN
struct rb_objspace rb_objspace_t
#define GC_OLDMALLOC_LIMIT_MIN
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
void * ruby_xrealloc(void *ptr, size_t new_size)
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
void * ruby_xmalloc_body(size_t size)
void * ruby_xcalloc_body(size_t n, size_t size)
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
VALUE rb_gc_disable_no_rest(void)
#define nonspecial_obj_id(obj)
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
#define gc_event_hook(objspace, event, data)
size_t rb_gc_stat(VALUE key)
#define GC_HEAP_GROWTH_FACTOR
void rb_iseq_mark(const rb_iseq_t *iseq)
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
void rb_iseq_free(const rb_iseq_t *iseq)
void rb_objspace_free(rb_objspace_t *objspace)
#define heap_pages_sorted
void rb_free_const_table(struct rb_id_table *tbl)
#define MARKED_IN_BITMAP(bits, p)
#define gc_stress_full_mark_after_malloc_p()
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
void rb_malloc_info_show_results(void)
#define GET_HEAP_PINNED_BITS(x)
#define heap_allocated_pages
#define RVALUE_MARKING_BITMAP(obj)
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
#define ruby_gc_stressful
int page_compare_func_t(const void *, const void *, void *)
void Init_gc_stress(void)
void rb_gc_adjust_memory_usage(ssize_t diff)
const struct st_hash_type rb_hashtype_ident
#define will_be_incremental_marking(objspace)
VALUE rb_gc_latest_gc_info(VALUE key)
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
#define RUBY_DTRACE_GC_HOOK(name)
#define heap_allocatable_pages
MJIT_FUNC_EXPORTED const char * rb_obj_info(VALUE obj)
#define RESTORE_FINALIZER()
#define GC_HEAP_FREE_SLOTS
struct rb_heap_struct rb_heap_t
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
@ gc_stress_full_mark_after_malloc
@ gc_stress_no_immediate_sweep
#define rb_data_typed_object_alloc
#define is_lazy_sweeping(heap)
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
#define rb_objspace_of(vm)
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
int each_obj_callback(void *, void *, size_t, void *)
#define ruby_initial_gc_stress
volatile VALUE rb_gc_guarded_val
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
#define is_full_marking(objspace)
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
#define GC_PROFILE_MORE_DETAIL
void rb_include_module(VALUE, VALUE)
void rb_class_detach_subclasses(VALUE)
VALUE rb_define_class_under(VALUE, const char *, VALUE)
Defines a class under the namespace of outer.
int rb_singleton_class_internal_p(VALUE sklass)
VALUE rb_define_module(const char *)
void rb_class_detach_module_subclasses(VALUE)
void rb_class_remove_from_module_subclasses(VALUE)
VALUE rb_define_module_under(VALUE, const char *)
void rb_class_remove_from_super_subclasses(VALUE)
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *)
VALUE rb_mKernel
Kernel module.
void rb_gc_register_mark_object(VALUE obj)
void rb_gc_unregister_address(VALUE *addr)
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_cObject
Object class.
void rb_gc_writebarrier_unprotect(VALUE obj)
void rb_global_variable(VALUE *var)
union RString::@157::@158::@159 aux
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
const rb_data_type_t * type
void rb_gc_register_address(VALUE *addr)
void rb_gc_writebarrier(VALUE a, VALUE b)
void rb_free_tmp_buffer(volatile VALUE *store)
struct RArray::@160::@161 heap
union RArray::@160::@161::@162 aux
struct RString::@157::@158 heap
struct rb_data_type_struct::@163 function
int ruby_stack_check(void)
size_t ruby_stack_length(VALUE **p)
void rb_raise(VALUE exc, const char *fmt,...)
void rb_bug(const char *fmt,...)
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
void rb_warn(const char *fmt,...)
VALUE rb_ensure(VALUE(*)(VALUE), VALUE, VALUE(*)(VALUE), VALUE)
An equivalent to ensure clause.
VALUE rb_errinfo(void)
The current exception in the current thread.
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
VALUE rb_to_int(VALUE)
Converts val into Integer.
size_t rb_hash_ar_table_size(void)
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
void rb_id_table_free(struct rb_id_table *tbl)
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
rb_id_table_iterator_result
#define __asan_region_is_poisoned(x, y)
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
RUBY_FUNC_EXPORTED size_t rb_io_memsize(const rb_io_t *fptr)
VALUE type(ANYARGS)
ANYARGS-ed function type.
unsigned char buf[MIME_BUF_SIZE]
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
ONIG_EXTERN void onig_free(OnigRegex)
size_t onig_region_memsize(const OnigRegion *regs)
size_t onig_memsize(const regex_t *reg)
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
void(* RUBY_DATA_FUNC)(void *)
rb_atomic_t cnt[RUBY_NSIG]
st_index_t st_numhash(st_data_t n)
void st_free_table(st_table *tab)
st_table * st_init_numtable_with_size(st_index_t size)
size_t st_memsize(const st_table *tab)
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
void st_add_direct(st_table *tab, st_data_t key, st_data_t value)
st_table * st_init_numtable(void)
st_table * st_init_strtable(void)
int st_insert(st_table *tab, st_data_t key, st_data_t value)
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
st_table * st_init_table(const struct st_hash_type *type)
int st_foreach_with_replace(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
int st_update(st_table *tab, st_data_t key, st_update_callback_func *func, st_data_t arg)
struct RVALUE::@141::@142 free
union RVALUE::@141::@143 imemo
struct rb_method_entry_struct ment
struct vm_throw_data throw_data
struct RVALUE::@141::@144 values
struct RTypedData typeddata
struct RRational rational
struct rb_imemo_tmpbuf_struct alloc
each_obj_callback * callback
struct force_finalize_list * next
size_t heap_total_objects
struct heap_page_header header
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
unsigned int has_remembered_objects
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
unsigned int before_sweep
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
struct list_node page_node
unsigned int has_uncollectible_shady_objects
struct heap_page * free_next
struct heap_page::@153 flags
struct rb_method_definition_struct *const def
const VALUE defined_class
rb_subclass_entry_t * subclasses
const VALUE refined_class
struct heap_page * free_pages
struct heap_page * using_page
struct heap_page * sweeping_page
struct heap_page * pooled_pages
struct rb_imemo_tmpbuf_struct * next
struct rb_io_t::rb_io_enc_t encs
VALUE writeconv_asciicompat
VALUE writeconv_pre_ecopts
VALUE tied_io_for_writing
rb_iseq_location_t location
struct rb_iseq_constant_body * body
struct rb_method_entry_struct * original_me
struct rb_hook_list_struct * hooks
rb_method_bmethod_t bmethod
union rb_method_definition_struct::@41 body
rb_method_refined_t refined
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
rb_cref_t * cref
class reference, should be marked
struct rb_method_entry_struct * orig_me
void(* mark_func)(VALUE v, void *data)
struct rb_objspace::@145 malloc_params
size_t total_freed_objects
size_t uncollectible_wb_unprotected_objects_limit
size_t heap_used_at_gc_start
struct rb_objspace::@150 rgengc
st_table * finalizer_table
unsigned int during_incremental_marking
rb_event_flag_t hook_events
size_t moved_count_table[T_MASK]
struct rb_objspace::mark_func_data_struct * mark_func_data
gc_profile_record * records
unsigned int during_minor_gc
struct gc_list * global_list
struct rb_objspace::@151 rcompactor
size_t uncollectible_wb_unprotected_objects
unsigned int gc_stressful
gc_profile_record * current_record
struct rb_objspace::@152 rincgc
struct rb_objspace::@147 atomic_flags
struct rb_objspace::@146 flags
double gc_sweep_start_time
struct heap_page ** sorted
unsigned int immediate_sweep
size_t oldmalloc_increase_limit
unsigned int during_compacting
size_t considered_count_table[T_MASK]
size_t total_allocated_objects_at_gc_start
struct rb_objspace::@149 profile
size_t oldmalloc_increase
size_t total_allocated_pages
struct rb_objspace::@148 heap_pages
size_t total_allocated_objects
unsigned int dont_incremental
rb_subclass_entry_t * next
int char_offset_num_allocated
struct rmatch_offset * char_offset
void(* func)(const char *category, VALUE, void *)
double oldmalloc_limit_growth_factor
double heap_free_slots_max_ratio
double heap_free_slots_goal_ratio
double malloc_limit_growth_factor
double oldobject_limit_factor
double heap_free_slots_min_ratio
size_t oldmalloc_limit_min
size_t oldmalloc_limit_max
struct stack_chunk * next
VALUE data[STACK_CHUNK_SIZE]
size_t zombie_object_count
size_t remembered_shady_count
IFUNC (Internal FUNCtion)
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
void rb_transient_heap_promote(VALUE obj)
void rb_transient_heap_verify(void)
void rb_transient_heap_mark(VALUE obj, const void *ptr)
void rb_transient_heap_finish_marking(void)
void rb_transient_heap_start_marking(int full_marking)
void rb_transient_heap_update_references(void)
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)