Ruby 3.3.0p0 (2023-12-25 revision 5124f9ac7513eb590c37717337c430cb93caa151)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
34#else
35# include <setjmp.h>
36#endif
37#include <stdarg.h>
38#include <stdio.h>
39
40/* MALLOC_HEADERS_BEGIN */
41#ifndef HAVE_MALLOC_USABLE_SIZE
42# ifdef _WIN32
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
48# endif
49#endif
50
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
53/* Alternative malloc header is included in ruby/missing.h */
54# elif defined(HAVE_MALLOC_H)
55# include <malloc.h>
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
60# endif
61#endif
62
63#ifdef HAVE_MALLOC_TRIM
64# include <malloc.h>
65
66# ifdef __EMSCRIPTEN__
67/* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
68# include <emscripten/emmalloc.h>
69# endif
70#endif
71
72#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
73/* LIST_HEAD conflicts with sys/queue.h on macOS */
74# include <sys/user.h>
75#endif
76/* MALLOC_HEADERS_END */
77
78#ifdef HAVE_SYS_TIME_H
79# include <sys/time.h>
80#endif
81
82#ifdef HAVE_SYS_RESOURCE_H
83# include <sys/resource.h>
84#endif
85
86#if defined _WIN32 || defined __CYGWIN__
87# include <windows.h>
88#elif defined(HAVE_POSIX_MEMALIGN)
89#elif defined(HAVE_MEMALIGN)
90# include <malloc.h>
91#endif
92
93#include <sys/types.h>
94
95#ifdef __EMSCRIPTEN__
96#include <emscripten.h>
97#endif
98
99#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
100# include <mach/task.h>
101# include <mach/mach_init.h>
102# include <mach/mach_port.h>
103#endif
104#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
105
106#include "constant.h"
107#include "darray.h"
108#include "debug_counter.h"
109#include "eval_intern.h"
110#include "id_table.h"
111#include "internal.h"
112#include "internal/class.h"
113#include "internal/compile.h"
114#include "internal/complex.h"
115#include "internal/cont.h"
116#include "internal/error.h"
117#include "internal/eval.h"
118#include "internal/gc.h"
119#include "internal/hash.h"
120#include "internal/imemo.h"
121#include "internal/io.h"
122#include "internal/numeric.h"
123#include "internal/object.h"
124#include "internal/proc.h"
125#include "internal/rational.h"
126#include "internal/sanitizers.h"
127#include "internal/struct.h"
128#include "internal/symbol.h"
129#include "internal/thread.h"
130#include "internal/variable.h"
131#include "internal/warnings.h"
132#include "rjit.h"
133#include "probes.h"
134#include "regint.h"
135#include "ruby/debug.h"
136#include "ruby/io.h"
137#include "ruby/re.h"
138#include "ruby/st.h"
139#include "ruby/thread.h"
140#include "ruby/util.h"
141#include "ruby_assert.h"
142#include "ruby_atomic.h"
143#include "symbol.h"
144#include "vm_core.h"
145#include "vm_sync.h"
146#include "vm_callinfo.h"
147#include "ractor_core.h"
148
149#include "builtin.h"
150#include "shape.h"
151
152#define rb_setjmp(env) RUBY_SETJMP(env)
153#define rb_jmp_buf rb_jmpbuf_t
154#undef rb_data_object_wrap
155
156#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
157#define MAP_ANONYMOUS MAP_ANON
158#endif
159
160
161static size_t malloc_offset = 0;
162#if defined(HAVE_MALLOC_USABLE_SIZE)
163static size_t
164gc_compute_malloc_offset(void)
165{
166 // Different allocators use different metadata storage strategies which result in different
167 // ideal sizes.
168 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
169 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
170 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
171 // waste memory.
172 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
173 // no wasted memory.
174 size_t offset = 0;
175 for (offset = 0; offset <= 16; offset += 8) {
176 size_t allocated = (64 - offset);
177 void *test_ptr = malloc(allocated);
178 size_t wasted = malloc_usable_size(test_ptr) - allocated;
179 free(test_ptr);
180
181 if (wasted == 0) {
182 return offset;
183 }
184 }
185 return 0;
186}
187#else
188static size_t
189gc_compute_malloc_offset(void)
190{
191 // If we don't have malloc_usable_size, we use powers of 2.
192 return 0;
193}
194#endif
195
196size_t
197rb_malloc_grow_capa(size_t current, size_t type_size)
198{
199 size_t current_capacity = current;
200 if (current_capacity < 4) {
201 current_capacity = 4;
202 }
203 current_capacity *= type_size;
204
205 // We double the current capacity.
206 size_t new_capacity = (current_capacity * 2);
207
208 // And round up to the next power of 2 if it's not already one.
209 if (rb_popcount64(new_capacity) != 1) {
210 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
211 }
212
213 new_capacity -= malloc_offset;
214 new_capacity /= type_size;
215 if (current > new_capacity) {
216 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
217 }
218 RUBY_ASSERT(new_capacity > current);
219 return new_capacity;
220}
221
222static inline struct rbimpl_size_mul_overflow_tag
223size_add_overflow(size_t x, size_t y)
224{
225 size_t z;
226 bool p;
227#if 0
228
229#elif __has_builtin(__builtin_add_overflow)
230 p = __builtin_add_overflow(x, y, &z);
231
232#elif defined(DSIZE_T)
233 RB_GNUC_EXTENSION DSIZE_T dx = x;
234 RB_GNUC_EXTENSION DSIZE_T dy = y;
235 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
236 p = dz > SIZE_MAX;
237 z = (size_t)dz;
238
239#else
240 z = x + y;
241 p = z < y;
242
243#endif
244 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
245}
246
247static inline struct rbimpl_size_mul_overflow_tag
248size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
249{
250 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
251 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
252 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
253}
254
255static inline struct rbimpl_size_mul_overflow_tag
256size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
257{
258 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
259 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
260 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
261 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
262}
263
264PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
265
266static inline size_t
267size_mul_or_raise(size_t x, size_t y, VALUE exc)
268{
269 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
270 if (LIKELY(!t.left)) {
271 return t.right;
272 }
273 else if (rb_during_gc()) {
274 rb_memerror(); /* or...? */
275 }
276 else {
277 gc_raise(
278 exc,
279 "integer overflow: %"PRIuSIZE
280 " * %"PRIuSIZE
281 " > %"PRIuSIZE,
282 x, y, (size_t)SIZE_MAX);
283 }
284}
285
286size_t
287rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
288{
289 return size_mul_or_raise(x, y, exc);
290}
291
292static inline size_t
293size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
294{
295 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
296 if (LIKELY(!t.left)) {
297 return t.right;
298 }
299 else if (rb_during_gc()) {
300 rb_memerror(); /* or...? */
301 }
302 else {
303 gc_raise(
304 exc,
305 "integer overflow: %"PRIuSIZE
306 " * %"PRIuSIZE
307 " + %"PRIuSIZE
308 " > %"PRIuSIZE,
309 x, y, z, (size_t)SIZE_MAX);
310 }
311}
312
313size_t
314rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
315{
316 return size_mul_add_or_raise(x, y, z, exc);
317}
318
319static inline size_t
320size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
321{
322 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
323 if (LIKELY(!t.left)) {
324 return t.right;
325 }
326 else if (rb_during_gc()) {
327 rb_memerror(); /* or...? */
328 }
329 else {
330 gc_raise(
331 exc,
332 "integer overflow: %"PRIdSIZE
333 " * %"PRIdSIZE
334 " + %"PRIdSIZE
335 " * %"PRIdSIZE
336 " > %"PRIdSIZE,
337 x, y, z, w, (size_t)SIZE_MAX);
338 }
339}
340
341#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
342/* trick the compiler into thinking a external signal handler uses this */
343volatile VALUE rb_gc_guarded_val;
344volatile VALUE *
345rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
346{
347 rb_gc_guarded_val = val;
348
349 return ptr;
350}
351#endif
352
353#ifndef GC_HEAP_INIT_SLOTS
354#define GC_HEAP_INIT_SLOTS 10000
355#endif
356#ifndef GC_HEAP_FREE_SLOTS
357#define GC_HEAP_FREE_SLOTS 4096
358#endif
359#ifndef GC_HEAP_GROWTH_FACTOR
360#define GC_HEAP_GROWTH_FACTOR 1.8
361#endif
362#ifndef GC_HEAP_GROWTH_MAX_SLOTS
363#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
364#endif
365#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
366# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
367#endif
368#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
369#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
370#endif
371
372#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
373#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
374#endif
375#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
376#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
377#endif
378#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
379#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
380#endif
381
382#ifndef GC_MALLOC_LIMIT_MIN
383#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
384#endif
385#ifndef GC_MALLOC_LIMIT_MAX
386#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
387#endif
388#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
389#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
390#endif
391
392#ifndef GC_OLDMALLOC_LIMIT_MIN
393#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
394#endif
395#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
396#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
397#endif
398#ifndef GC_OLDMALLOC_LIMIT_MAX
399#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
400#endif
401
402#ifndef GC_CAN_COMPILE_COMPACTION
403#if defined(__wasi__) /* WebAssembly doesn't support signals */
404# define GC_CAN_COMPILE_COMPACTION 0
405#else
406# define GC_CAN_COMPILE_COMPACTION 1
407#endif
408#endif
409
410#ifndef PRINT_MEASURE_LINE
411#define PRINT_MEASURE_LINE 0
412#endif
413#ifndef PRINT_ENTER_EXIT_TICK
414#define PRINT_ENTER_EXIT_TICK 0
415#endif
416#ifndef PRINT_ROOT_TICKS
417#define PRINT_ROOT_TICKS 0
418#endif
419
420#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
421#define TICK_TYPE 1
422
423typedef struct {
424 size_t size_pool_init_slots[SIZE_POOL_COUNT];
425 size_t heap_free_slots;
426 double growth_factor;
427 size_t growth_max_slots;
428
429 double heap_free_slots_min_ratio;
430 double heap_free_slots_goal_ratio;
431 double heap_free_slots_max_ratio;
432 double uncollectible_wb_unprotected_objects_limit_ratio;
433 double oldobject_limit_factor;
434
435 size_t malloc_limit_min;
436 size_t malloc_limit_max;
437 double malloc_limit_growth_factor;
438
439 size_t oldmalloc_limit_min;
440 size_t oldmalloc_limit_max;
441 double oldmalloc_limit_growth_factor;
442
443 VALUE gc_stress;
445
446static ruby_gc_params_t gc_params = {
447 { 0 },
448 GC_HEAP_FREE_SLOTS,
449 GC_HEAP_GROWTH_FACTOR,
450 GC_HEAP_GROWTH_MAX_SLOTS,
451
452 GC_HEAP_FREE_SLOTS_MIN_RATIO,
453 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
454 GC_HEAP_FREE_SLOTS_MAX_RATIO,
455 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
456 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
457
458 GC_MALLOC_LIMIT_MIN,
459 GC_MALLOC_LIMIT_MAX,
460 GC_MALLOC_LIMIT_GROWTH_FACTOR,
461
462 GC_OLDMALLOC_LIMIT_MIN,
463 GC_OLDMALLOC_LIMIT_MAX,
464 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
465
466 FALSE,
467};
468
469/* GC_DEBUG:
470 * enable to embed GC debugging information.
471 */
472#ifndef GC_DEBUG
473#define GC_DEBUG 0
474#endif
475
476/* RGENGC_DEBUG:
477 * 1: basic information
478 * 2: remember set operation
479 * 3: mark
480 * 4:
481 * 5: sweep
482 */
483#ifndef RGENGC_DEBUG
484#ifdef RUBY_DEVEL
485#define RGENGC_DEBUG -1
486#else
487#define RGENGC_DEBUG 0
488#endif
489#endif
490#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
491# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
492#elif defined(HAVE_VA_ARGS_MACRO)
493# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
494#else
495# define RGENGC_DEBUG_ENABLED(level) 0
496#endif
497int ruby_rgengc_debug;
498
499/* RGENGC_CHECK_MODE
500 * 0: disable all assertions
501 * 1: enable assertions (to debug RGenGC)
502 * 2: enable internal consistency check at each GC (for debugging)
503 * 3: enable internal consistency check at each GC steps (for debugging)
504 * 4: enable liveness check
505 * 5: show all references
506 */
507#ifndef RGENGC_CHECK_MODE
508#define RGENGC_CHECK_MODE 0
509#endif
510
511// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
512#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
513
514/* RGENGC_PROFILE
515 * 0: disable RGenGC profiling
516 * 1: enable profiling for basic information
517 * 2: enable profiling for each types
518 */
519#ifndef RGENGC_PROFILE
520#define RGENGC_PROFILE 0
521#endif
522
523/* RGENGC_ESTIMATE_OLDMALLOC
524 * Enable/disable to estimate increase size of malloc'ed size by old objects.
525 * If estimation exceeds threshold, then will invoke full GC.
526 * 0: disable estimation.
527 * 1: enable estimation.
528 */
529#ifndef RGENGC_ESTIMATE_OLDMALLOC
530#define RGENGC_ESTIMATE_OLDMALLOC 1
531#endif
532
533/* RGENGC_FORCE_MAJOR_GC
534 * Force major/full GC if this macro is not 0.
535 */
536#ifndef RGENGC_FORCE_MAJOR_GC
537#define RGENGC_FORCE_MAJOR_GC 0
538#endif
539
540#ifndef GC_PROFILE_MORE_DETAIL
541#define GC_PROFILE_MORE_DETAIL 0
542#endif
543#ifndef GC_PROFILE_DETAIL_MEMORY
544#define GC_PROFILE_DETAIL_MEMORY 0
545#endif
546#ifndef GC_ENABLE_LAZY_SWEEP
547#define GC_ENABLE_LAZY_SWEEP 1
548#endif
549#ifndef CALC_EXACT_MALLOC_SIZE
550#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
551#endif
552#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
553#ifndef MALLOC_ALLOCATED_SIZE
554#define MALLOC_ALLOCATED_SIZE 0
555#endif
556#else
557#define MALLOC_ALLOCATED_SIZE 0
558#endif
559#ifndef MALLOC_ALLOCATED_SIZE_CHECK
560#define MALLOC_ALLOCATED_SIZE_CHECK 0
561#endif
562
563#ifndef GC_DEBUG_STRESS_TO_CLASS
564#define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
565#endif
566
567#ifndef RGENGC_OBJ_INFO
568#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
569#endif
570
571typedef enum {
572 GPR_FLAG_NONE = 0x000,
573 /* major reason */
574 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
575 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
576 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
577 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
578#if RGENGC_ESTIMATE_OLDMALLOC
579 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
580#endif
581 GPR_FLAG_MAJOR_MASK = 0x0ff,
582
583 /* gc reason */
584 GPR_FLAG_NEWOBJ = 0x100,
585 GPR_FLAG_MALLOC = 0x200,
586 GPR_FLAG_METHOD = 0x400,
587 GPR_FLAG_CAPI = 0x800,
588 GPR_FLAG_STRESS = 0x1000,
589
590 /* others */
591 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
592 GPR_FLAG_HAVE_FINALIZE = 0x4000,
593 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
594 GPR_FLAG_FULL_MARK = 0x10000,
595 GPR_FLAG_COMPACT = 0x20000,
596
597 GPR_DEFAULT_REASON =
598 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
599 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
600} gc_profile_record_flag;
601
602typedef struct gc_profile_record {
603 unsigned int flags;
604
605 double gc_time;
606 double gc_invoke_time;
607
608 size_t heap_total_objects;
609 size_t heap_use_size;
610 size_t heap_total_size;
611 size_t moved_objects;
612
613#if GC_PROFILE_MORE_DETAIL
614 double gc_mark_time;
615 double gc_sweep_time;
616
617 size_t heap_use_pages;
618 size_t heap_live_objects;
619 size_t heap_free_objects;
620
621 size_t allocate_increase;
622 size_t allocate_limit;
623
624 double prepare_time;
625 size_t removing_objects;
626 size_t empty_objects;
627#if GC_PROFILE_DETAIL_MEMORY
628 long maxrss;
629 long minflt;
630 long majflt;
631#endif
632#endif
633#if MALLOC_ALLOCATED_SIZE
634 size_t allocated_size;
635#endif
636
637#if RGENGC_PROFILE > 0
638 size_t old_objects;
639 size_t remembered_normal_objects;
640 size_t remembered_shady_objects;
641#endif
643
644struct RMoved {
645 VALUE flags;
646 VALUE dummy;
647 VALUE destination;
648 shape_id_t original_shape_id;
649};
650
651#define RMOVED(obj) ((struct RMoved *)(obj))
652
653typedef struct RVALUE {
654 union {
655 struct {
656 VALUE flags; /* always 0 for freed obj */
657 struct RVALUE *next;
658 } free;
659 struct RMoved moved;
660 struct RBasic basic;
661 struct RObject object;
662 struct RClass klass;
663 struct RFloat flonum;
664 struct RString string;
665 struct RArray array;
666 struct RRegexp regexp;
667 struct RHash hash;
668 struct RData data;
669 struct RTypedData typeddata;
670 struct RStruct rstruct;
671 struct RBignum bignum;
672 struct RFile file;
673 struct RMatch match;
674 struct RRational rational;
675 struct RComplex complex;
676 struct RSymbol symbol;
677 union {
678 rb_cref_t cref;
679 struct vm_svar svar;
680 struct vm_throw_data throw_data;
681 struct vm_ifunc ifunc;
682 struct MEMO memo;
683 struct rb_method_entry_struct ment;
684 const rb_iseq_t iseq;
685 rb_env_t env;
686 struct rb_imemo_tmpbuf_struct alloc;
687 rb_ast_t ast;
688 } imemo;
689 struct {
690 struct RBasic basic;
691 VALUE v1;
692 VALUE v2;
693 VALUE v3;
694 } values;
695 } as;
696
697 /* Start of RVALUE_OVERHEAD.
698 * Do not directly read these members from the RVALUE as they're located
699 * at the end of the slot (which may differ in size depending on the size
700 * pool). */
701#if RACTOR_CHECK_MODE
702 uint32_t _ractor_belonging_id;
703#endif
704#if GC_DEBUG
705 const char *file;
706 int line;
707#endif
708} RVALUE;
709
710#if RACTOR_CHECK_MODE
711# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
712#elif GC_DEBUG
713# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
714#else
715# define RVALUE_OVERHEAD 0
716#endif
717
718STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
719STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
720
721typedef uintptr_t bits_t;
722enum {
723 BITS_SIZE = sizeof(bits_t),
724 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
725};
726#define popcount_bits rb_popcount_intptr
727
729 struct heap_page *page;
730};
731
733 struct heap_page_header header;
734 /* char gap[]; */
735 /* RVALUE values[]; */
736};
737
738struct gc_list {
739 VALUE *varptr;
740 struct gc_list *next;
741};
742
743#define STACK_CHUNK_SIZE 500
744
745typedef struct stack_chunk {
746 VALUE data[STACK_CHUNK_SIZE];
747 struct stack_chunk *next;
749
750typedef struct mark_stack {
751 stack_chunk_t *chunk;
752 stack_chunk_t *cache;
753 int index;
754 int limit;
755 size_t cache_size;
756 size_t unused_cache_size;
758
759#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
760#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
761
762typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d);
763
764typedef struct rb_heap_struct {
765 struct heap_page *free_pages;
766 struct ccan_list_head pages;
767 struct heap_page *sweeping_page; /* iterator for .pages */
768 struct heap_page *compact_cursor;
769 uintptr_t compact_cursor_index;
770 struct heap_page *pooled_pages;
771 size_t total_pages; /* total page count in a heap */
772 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
773} rb_heap_t;
774
775typedef struct rb_size_pool_struct {
776 short slot_size;
777
778 size_t allocatable_pages;
779
780 /* Basic statistics */
781 size_t total_allocated_pages;
782 size_t total_freed_pages;
783 size_t force_major_gc_count;
784 size_t force_incremental_marking_finish_count;
785 size_t total_allocated_objects;
786 size_t total_freed_objects;
787
788 /* Sweeping statistics */
789 size_t freed_slots;
790 size_t empty_slots;
791
792 rb_heap_t eden_heap;
793 rb_heap_t tomb_heap;
795
796enum gc_mode {
797 gc_mode_none,
798 gc_mode_marking,
799 gc_mode_sweeping,
800 gc_mode_compacting,
801};
802
803typedef struct rb_objspace {
804 struct {
805 size_t limit;
806 size_t increase;
807#if MALLOC_ALLOCATED_SIZE
808 size_t allocated_size;
809 size_t allocations;
810#endif
811
812 } malloc_params;
813
814 struct {
815 unsigned int mode : 2;
816 unsigned int immediate_sweep : 1;
817 unsigned int dont_gc : 1;
818 unsigned int dont_incremental : 1;
819 unsigned int during_gc : 1;
820 unsigned int during_compacting : 1;
821 unsigned int during_reference_updating : 1;
822 unsigned int gc_stressful: 1;
823 unsigned int has_newobj_hook: 1;
824 unsigned int during_minor_gc : 1;
825 unsigned int during_incremental_marking : 1;
826 unsigned int measure_gc : 1;
827 } flags;
828
829 rb_event_flag_t hook_events;
830 VALUE next_object_id;
831
832 rb_size_pool_t size_pools[SIZE_POOL_COUNT];
833
834 struct {
835 rb_atomic_t finalizing;
836 } atomic_flags;
837
839 size_t marked_slots;
840
841 struct {
842 struct heap_page **sorted;
843 size_t allocated_pages;
844 size_t allocatable_pages;
845 size_t sorted_length;
846 uintptr_t range[2];
847 size_t freeable_pages;
848
849 /* final */
850 size_t final_slots;
851 VALUE deferred_final;
852 } heap_pages;
853
854 st_table *finalizer_table;
855
856 struct {
857 int run;
858 unsigned int latest_gc_info;
859 gc_profile_record *records;
860 gc_profile_record *current_record;
861 size_t next_index;
862 size_t size;
863
864#if GC_PROFILE_MORE_DETAIL
865 double prepare_time;
866#endif
867 double invoke_time;
868
869 size_t minor_gc_count;
870 size_t major_gc_count;
871 size_t compact_count;
872 size_t read_barrier_faults;
873#if RGENGC_PROFILE > 0
874 size_t total_generated_normal_object_count;
875 size_t total_generated_shady_object_count;
876 size_t total_shade_operation_count;
877 size_t total_promoted_count;
878 size_t total_remembered_normal_object_count;
879 size_t total_remembered_shady_object_count;
880
881#if RGENGC_PROFILE >= 2
882 size_t generated_normal_object_count_types[RUBY_T_MASK];
883 size_t generated_shady_object_count_types[RUBY_T_MASK];
884 size_t shade_operation_count_types[RUBY_T_MASK];
885 size_t promoted_types[RUBY_T_MASK];
886 size_t remembered_normal_object_count_types[RUBY_T_MASK];
887 size_t remembered_shady_object_count_types[RUBY_T_MASK];
888#endif
889#endif /* RGENGC_PROFILE */
890
891 /* temporary profiling space */
892 double gc_sweep_start_time;
893 size_t total_allocated_objects_at_gc_start;
894 size_t heap_used_at_gc_start;
895
896 /* basic statistics */
897 size_t count;
898 uint64_t marking_time_ns;
899 struct timespec marking_start_time;
900 uint64_t sweeping_time_ns;
901 struct timespec sweeping_start_time;
902
903 /* Weak references */
904 size_t weak_references_count;
905 size_t retained_weak_references_count;
906 } profile;
907 struct gc_list *global_list;
908
909 VALUE gc_stress_mode;
910
911 struct {
912 VALUE parent_object;
913 int need_major_gc;
914 size_t last_major_gc;
915 size_t uncollectible_wb_unprotected_objects;
916 size_t uncollectible_wb_unprotected_objects_limit;
917 size_t old_objects;
918 size_t old_objects_limit;
919
920#if RGENGC_ESTIMATE_OLDMALLOC
921 size_t oldmalloc_increase;
922 size_t oldmalloc_increase_limit;
923#endif
924
925#if RGENGC_CHECK_MODE >= 2
926 struct st_table *allrefs_table;
927 size_t error_count;
928#endif
929 } rgengc;
930
931 struct {
932 size_t considered_count_table[T_MASK];
933 size_t moved_count_table[T_MASK];
934 size_t moved_up_count_table[T_MASK];
935 size_t moved_down_count_table[T_MASK];
936 size_t total_moved;
937
938 /* This function will be used, if set, to sort the heap prior to compaction */
939 gc_compact_compare_func compare_func;
940 } rcompactor;
941
942 struct {
943 size_t pooled_slots;
944 size_t step_slots;
945 } rincgc;
946
947 st_table *id_to_obj_tbl;
948 st_table *obj_to_id_tbl;
949
950#if GC_DEBUG_STRESS_TO_CLASS
951 VALUE stress_to_class;
952#endif
953
954 rb_darray(VALUE *) weak_references;
955 rb_postponed_job_handle_t finalize_deferred_pjob;
957
958
959#ifndef HEAP_PAGE_ALIGN_LOG
960/* default tiny heap size: 64KiB */
961#define HEAP_PAGE_ALIGN_LOG 16
962#endif
963
964#define BASE_SLOT_SIZE sizeof(RVALUE)
965
966#define CEILDIV(i, mod) roomof(i, mod)
967enum {
968 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
969 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
970 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
971 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
972 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
973 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
974};
975#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
976#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
977
978#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
979# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
980#endif
981
982#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
983/* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
984 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
985
986#ifndef HAVE_MMAP
987/* We can't use mmap of course, if it is not available. */
988static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
989
990#elif defined(__wasm__)
991/* wasmtime does not have proper support for mmap.
992 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
993 */
994static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
995
996#elif HAVE_CONST_PAGE_SIZE
997/* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
998static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
999
1000#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
1001/* If we can use the maximum page size. */
1002static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
1003
1004#elif defined(PAGE_SIZE)
1005/* If the PAGE_SIZE macro can be used dynamically. */
1006# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
1007
1008#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
1009/* If we can use sysconf to determine the page size. */
1010# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
1011
1012#else
1013/* Otherwise we can't determine the system page size, so don't use mmap. */
1014static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
1015#endif
1016
1017#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
1018/* We can determine the system page size at runtime. */
1019# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
1020
1021static bool heap_page_alloc_use_mmap;
1022#endif
1023
1024#define RVALUE_AGE_BIT_COUNT 2
1025#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
1026
1028 short slot_size;
1029 short total_slots;
1030 short free_slots;
1031 short final_slots;
1032 short pinned_slots;
1033 struct {
1034 unsigned int before_sweep : 1;
1035 unsigned int has_remembered_objects : 1;
1036 unsigned int has_uncollectible_wb_unprotected_objects : 1;
1037 unsigned int in_tomb : 1;
1038 } flags;
1039
1040 rb_size_pool_t *size_pool;
1041
1042 struct heap_page *free_next;
1043 uintptr_t start;
1044 RVALUE *freelist;
1045 struct ccan_list_node page_node;
1046
1047 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
1048 /* the following three bitmaps are cleared at the beginning of full GC */
1049 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
1050 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
1051 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
1052
1053 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
1054
1055 /* If set, the object is not movable */
1056 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
1057 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
1058};
1059
1060/*
1061 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
1062 */
1063static void
1064asan_lock_freelist(struct heap_page *page)
1065{
1066 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1067}
1068
1069/*
1070 * When asan is enabled, this will enable the ability to write to the freelist
1071 */
1072static void
1073asan_unlock_freelist(struct heap_page *page)
1074{
1075 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1076}
1077
1078#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
1079#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
1080#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
1081
1082#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
1083#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
1084#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
1085#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1086
1087/* Bitmap Operations */
1088#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1089#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1090#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1091
1092/* getting bitmap */
1093#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1094#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1095#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1096#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1097#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1098
1099#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1100
1101#define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
1102#define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
1103
1104#define RVALUE_OLD_AGE 3
1105
1106static int
1107RVALUE_AGE_GET(VALUE obj)
1108{
1109 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1110 return (int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
1111}
1112
1113static void
1114RVALUE_AGE_SET(VALUE obj, int age)
1115{
1116 RUBY_ASSERT(age <= RVALUE_OLD_AGE);
1117 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1118 // clear the bits
1119 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
1120 // shift the correct value in
1121 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
1122 if (age == RVALUE_OLD_AGE) {
1124 }
1125 else {
1127 }
1128}
1129
1130/* Aliases */
1131#define rb_objspace (*rb_objspace_of(GET_VM()))
1132#define rb_objspace_of(vm) ((vm)->objspace)
1133#define unless_objspace(objspace) \
1134 rb_objspace_t *objspace; \
1135 rb_vm_t *unless_objspace_vm = GET_VM(); \
1136 if (unless_objspace_vm) objspace = unless_objspace_vm->objspace; \
1137 else /* return; or objspace will be warned uninitialized */
1138
1139#define ruby_initial_gc_stress gc_params.gc_stress
1140
1141VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1142
1143#define malloc_limit objspace->malloc_params.limit
1144#define malloc_increase objspace->malloc_params.increase
1145#define malloc_allocated_size objspace->malloc_params.allocated_size
1146#define heap_pages_sorted objspace->heap_pages.sorted
1147#define heap_allocated_pages objspace->heap_pages.allocated_pages
1148#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1149#define heap_pages_lomem objspace->heap_pages.range[0]
1150#define heap_pages_himem objspace->heap_pages.range[1]
1151#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1152#define heap_pages_final_slots objspace->heap_pages.final_slots
1153#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1154#define size_pools objspace->size_pools
1155#define during_gc objspace->flags.during_gc
1156#define finalizing objspace->atomic_flags.finalizing
1157#define finalizer_table objspace->finalizer_table
1158#define global_list objspace->global_list
1159#define ruby_gc_stressful objspace->flags.gc_stressful
1160#define ruby_gc_stress_mode objspace->gc_stress_mode
1161#if GC_DEBUG_STRESS_TO_CLASS
1162#define stress_to_class objspace->stress_to_class
1163#define set_stress_to_class(c) (stress_to_class = (c))
1164#else
1165#define stress_to_class (objspace, 0)
1166#define set_stress_to_class(c) (objspace, (c))
1167#endif
1168
1169#if 0
1170#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1171#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1172#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1173#define dont_gc_val() (objspace->flags.dont_gc)
1174#else
1175#define dont_gc_on() (objspace->flags.dont_gc = 1)
1176#define dont_gc_off() (objspace->flags.dont_gc = 0)
1177#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1178#define dont_gc_val() (objspace->flags.dont_gc)
1179#endif
1180
1181static inline enum gc_mode
1182gc_mode_verify(enum gc_mode mode)
1183{
1184#if RGENGC_CHECK_MODE > 0
1185 switch (mode) {
1186 case gc_mode_none:
1187 case gc_mode_marking:
1188 case gc_mode_sweeping:
1189 case gc_mode_compacting:
1190 break;
1191 default:
1192 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
1193 }
1194#endif
1195 return mode;
1196}
1197
1198static inline bool
1199has_sweeping_pages(rb_objspace_t *objspace)
1200{
1201 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1202 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1203 return TRUE;
1204 }
1205 }
1206 return FALSE;
1207}
1208
1209static inline size_t
1210heap_eden_total_pages(rb_objspace_t *objspace)
1211{
1212 size_t count = 0;
1213 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1214 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1215 }
1216 return count;
1217}
1218
1219static inline size_t
1220heap_eden_total_slots(rb_objspace_t *objspace)
1221{
1222 size_t count = 0;
1223 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1224 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1225 }
1226 return count;
1227}
1228
1229static inline size_t
1230heap_tomb_total_pages(rb_objspace_t *objspace)
1231{
1232 size_t count = 0;
1233 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1234 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1235 }
1236 return count;
1237}
1238
1239static inline size_t
1240heap_allocatable_pages(rb_objspace_t *objspace)
1241{
1242 size_t count = 0;
1243 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1244 count += size_pools[i].allocatable_pages;
1245 }
1246 return count;
1247}
1248
1249static inline size_t
1250heap_allocatable_slots(rb_objspace_t *objspace)
1251{
1252 size_t count = 0;
1253 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1254 rb_size_pool_t *size_pool = &size_pools[i];
1255 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1256 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1257 }
1258 return count;
1259}
1260
1261static inline size_t
1262total_allocated_pages(rb_objspace_t *objspace)
1263{
1264 size_t count = 0;
1265 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1266 rb_size_pool_t *size_pool = &size_pools[i];
1267 count += size_pool->total_allocated_pages;
1268 }
1269 return count;
1270}
1271
1272static inline size_t
1273total_freed_pages(rb_objspace_t *objspace)
1274{
1275 size_t count = 0;
1276 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1277 rb_size_pool_t *size_pool = &size_pools[i];
1278 count += size_pool->total_freed_pages;
1279 }
1280 return count;
1281}
1282
1283static inline size_t
1284total_allocated_objects(rb_objspace_t *objspace)
1285{
1286 size_t count = 0;
1287 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1288 rb_size_pool_t *size_pool = &size_pools[i];
1289 count += size_pool->total_allocated_objects;
1290 }
1291 return count;
1292}
1293
1294static inline size_t
1295total_freed_objects(rb_objspace_t *objspace)
1296{
1297 size_t count = 0;
1298 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1299 rb_size_pool_t *size_pool = &size_pools[i];
1300 count += size_pool->total_freed_objects;
1301 }
1302 return count;
1303}
1304
1305#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1306#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1307
1308#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1309#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1310#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1311#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1312#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1313#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1314#define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
1315#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1316
1317#if SIZEOF_LONG == SIZEOF_VOIDP
1318# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1319#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1320# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1321 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1322#else
1323# error not supported
1324#endif
1325
1326#define RANY(o) ((RVALUE*)(o))
1327
1328struct RZombie {
1329 struct RBasic basic;
1330 VALUE next;
1331 void (*dfree)(void *);
1332 void *data;
1333};
1334
1335#define RZOMBIE(o) ((struct RZombie *)(o))
1336
1337#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1338
1339#if RUBY_MARK_FREE_DEBUG
1340int ruby_gc_debug_indent = 0;
1341#endif
1343int ruby_disable_gc = 0;
1344int ruby_enable_autocompact = 0;
1345#if RGENGC_CHECK_MODE
1346gc_compact_compare_func ruby_autocompact_compare_func;
1347#endif
1348
1349void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool referece_updating);
1350void rb_iseq_free(const rb_iseq_t *iseq);
1351size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1352void rb_vm_update_references(void *ptr);
1353
1354void rb_gcdebug_print_obj_condition(VALUE obj);
1355
1356NORETURN(static void *gc_vraise(void *ptr));
1357NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1358NORETURN(static void negative_size_allocation_error(const char *));
1359
1360static void init_mark_stack(mark_stack_t *stack);
1361static int garbage_collect(rb_objspace_t *, unsigned int reason);
1362
1363static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1364static void gc_rest(rb_objspace_t *objspace);
1365
1366enum gc_enter_event {
1367 gc_enter_event_start,
1368 gc_enter_event_continue,
1369 gc_enter_event_rest,
1370 gc_enter_event_finalizer,
1371 gc_enter_event_rb_memerror,
1372};
1373
1374static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1375static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1376static void gc_marking_enter(rb_objspace_t *objspace);
1377static void gc_marking_exit(rb_objspace_t *objspace);
1378static void gc_sweeping_enter(rb_objspace_t *objspace);
1379static void gc_sweeping_exit(rb_objspace_t *objspace);
1380static bool gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1381
1382static void gc_sweep(rb_objspace_t *objspace);
1383static void gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool);
1384static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1385
1386static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1387static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1388static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1389NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1390
1391static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1392NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1393
1394static size_t obj_memsize_of(VALUE obj, int use_all_types);
1395static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1396
1397static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1398static VALUE gc_disable_no_rest(rb_objspace_t *);
1399
1400static double getrusage_time(void);
1401static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1402static inline void gc_prof_timer_start(rb_objspace_t *);
1403static inline void gc_prof_timer_stop(rb_objspace_t *);
1404static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1405static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1406static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1407static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1408static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1409static inline void gc_prof_set_heap_info(rb_objspace_t *);
1410
1411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1412 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1413 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1414 } \
1415} while (0)
1416
1417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1418
1419#define gc_prof_record(objspace) (objspace)->profile.current_record
1420#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1421
1422#ifdef HAVE_VA_ARGS_MACRO
1423# define gc_report(level, objspace, ...) \
1424 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1425#else
1426# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1427#endif
1428PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1429static const char *obj_info(VALUE obj);
1430static const char *obj_type_name(VALUE obj);
1431
1432static void gc_finalize_deferred(void *dmy);
1433
1434/*
1435 * 1 - TSC (H/W Time Stamp Counter)
1436 * 2 - getrusage
1437 */
1438#ifndef TICK_TYPE
1439#define TICK_TYPE 1
1440#endif
1441
1442#if USE_TICK_T
1443
1444#if TICK_TYPE == 1
1445/* the following code is only for internal tuning. */
1446
1447/* Source code to use RDTSC is quoted and modified from
1448 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1449 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1450 */
1451
1452#if defined(__GNUC__) && defined(__i386__)
1453typedef unsigned long long tick_t;
1454#define PRItick "llu"
1455static inline tick_t
1456tick(void)
1457{
1458 unsigned long long int x;
1459 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1460 return x;
1461}
1462
1463#elif defined(__GNUC__) && defined(__x86_64__)
1464typedef unsigned long long tick_t;
1465#define PRItick "llu"
1466
1467static __inline__ tick_t
1468tick(void)
1469{
1470 unsigned long hi, lo;
1471 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1472 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1473}
1474
1475#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1476typedef unsigned long long tick_t;
1477#define PRItick "llu"
1478
1479static __inline__ tick_t
1480tick(void)
1481{
1482 unsigned long long val = __builtin_ppc_get_timebase();
1483 return val;
1484}
1485
1486/* Implementation for macOS PPC by @nobu
1487 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1488 */
1489#elif defined(__POWERPC__) && defined(__APPLE__)
1490typedef unsigned long long tick_t;
1491#define PRItick "llu"
1492
1493static __inline__ tick_t
1494tick(void)
1495{
1496 unsigned long int upper, lower, tmp;
1497 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1498 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1499 do {
1500 mftbu(upper);
1501 mftb(lower);
1502 mftbu(tmp);
1503 } while (tmp != upper);
1504 return ((tick_t)upper << 32) | lower;
1505}
1506
1507#elif defined(__aarch64__) && defined(__GNUC__)
1508typedef unsigned long tick_t;
1509#define PRItick "lu"
1510
1511static __inline__ tick_t
1512tick(void)
1513{
1514 unsigned long val;
1515 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1516 return val;
1517}
1518
1519
1520#elif defined(_WIN32) && defined(_MSC_VER)
1521#include <intrin.h>
1522typedef unsigned __int64 tick_t;
1523#define PRItick "llu"
1524
1525static inline tick_t
1526tick(void)
1527{
1528 return __rdtsc();
1529}
1530
1531#else /* use clock */
1532typedef clock_t tick_t;
1533#define PRItick "llu"
1534
1535static inline tick_t
1536tick(void)
1537{
1538 return clock();
1539}
1540#endif /* TSC */
1541
1542#elif TICK_TYPE == 2
1543typedef double tick_t;
1544#define PRItick "4.9f"
1545
1546static inline tick_t
1547tick(void)
1548{
1549 return getrusage_time();
1550}
1551#else /* TICK_TYPE */
1552#error "choose tick type"
1553#endif /* TICK_TYPE */
1554
1555#define MEASURE_LINE(expr) do { \
1556 volatile tick_t start_time = tick(); \
1557 volatile tick_t end_time; \
1558 expr; \
1559 end_time = tick(); \
1560 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1561} while (0)
1562
1563#else /* USE_TICK_T */
1564#define MEASURE_LINE(expr) expr
1565#endif /* USE_TICK_T */
1566
1567static inline void *
1568asan_unpoison_object_temporary(VALUE obj)
1569{
1570 void *ptr = asan_poisoned_object_p(obj);
1571 asan_unpoison_object(obj, false);
1572 return ptr;
1573}
1574
1575static inline void *
1576asan_poison_object_restore(VALUE obj, void *ptr)
1577{
1578 if (ptr) {
1579 asan_poison_object(obj);
1580 }
1581 return NULL;
1582}
1583
1584#define asan_unpoisoning_object(obj) \
1585 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1586 *unpoisoning = &poisoned; /* flag to loop just once */ \
1587 unpoisoning; \
1588 unpoisoning = asan_poison_object_restore(obj, poisoned))
1589
1590#define FL_CHECK2(name, x, pred) \
1591 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1592 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1593#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1594#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1595#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1596
1597#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1598#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1599#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1600
1601#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1602#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1603#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1604
1605#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1606#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1607#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1608
1609static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1610static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1611static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1612
1613static int
1614check_rvalue_consistency_force(const VALUE obj, int terminate)
1615{
1616 int err = 0;
1617 rb_objspace_t *objspace = &rb_objspace;
1618
1619 RB_VM_LOCK_ENTER_NO_BARRIER();
1620 {
1621 if (SPECIAL_CONST_P(obj)) {
1622 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1623 err++;
1624 }
1625 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1626 /* check if it is in tomb_pages */
1627 struct heap_page *page = NULL;
1628 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1629 rb_size_pool_t *size_pool = &size_pools[i];
1630 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1631 if (page->start <= (uintptr_t)obj &&
1632 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1633 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1634 (void *)obj, (void *)page);
1635 err++;
1636 goto skip;
1637 }
1638 }
1639 }
1640 bp();
1641 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1642 err++;
1643 skip:
1644 ;
1645 }
1646 else {
1647 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1648 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1649 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1650 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1651 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1652 const int age = RVALUE_AGE_GET((VALUE)obj);
1653
1654 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1655 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1656 err++;
1657 }
1658 if (BUILTIN_TYPE(obj) == T_NONE) {
1659 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1660 err++;
1661 }
1662 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1663 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1664 err++;
1665 }
1666
1667 obj_memsize_of((VALUE)obj, FALSE);
1668
1669 /* check generation
1670 *
1671 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1672 */
1673 if (age > 0 && wb_unprotected_bit) {
1674 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1675 err++;
1676 }
1677
1678 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1679 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1680 err++;
1681 }
1682
1683 if (!is_full_marking(objspace)) {
1684 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1685 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1686 obj_info(obj), age);
1687 err++;
1688 }
1689 if (remembered_bit && age != RVALUE_OLD_AGE) {
1690 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1691 obj_info(obj), age);
1692 err++;
1693 }
1694 }
1695
1696 /*
1697 * check coloring
1698 *
1699 * marking:false marking:true
1700 * marked:false white *invalid*
1701 * marked:true black grey
1702 */
1703 if (is_incremental_marking(objspace) && marking_bit) {
1704 if (!is_marking(objspace) && !mark_bit) {
1705 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1706 err++;
1707 }
1708 }
1709 }
1710 }
1711 RB_VM_LOCK_LEAVE_NO_BARRIER();
1712
1713 if (err > 0 && terminate) {
1714 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1715 }
1716 return err;
1717}
1718
1719#if RGENGC_CHECK_MODE == 0
1720static inline VALUE
1721check_rvalue_consistency(const VALUE obj)
1722{
1723 return obj;
1724}
1725#else
1726static VALUE
1727check_rvalue_consistency(const VALUE obj)
1728{
1729 check_rvalue_consistency_force(obj, TRUE);
1730 return obj;
1731}
1732#endif
1733
1734static inline int
1735gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1736{
1737 if (RB_SPECIAL_CONST_P(obj)) {
1738 return FALSE;
1739 }
1740 else {
1741 void *poisoned = asan_unpoison_object_temporary(obj);
1742
1743 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1744 /* Re-poison slot if it's not the one we want */
1745 if (poisoned) {
1746 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1747 asan_poison_object(obj);
1748 }
1749 return ret;
1750 }
1751}
1752
1753static inline int
1754RVALUE_MARKED(VALUE obj)
1755{
1756 check_rvalue_consistency(obj);
1757 return RVALUE_MARK_BITMAP(obj) != 0;
1758}
1759
1760static inline int
1761RVALUE_PINNED(VALUE obj)
1762{
1763 check_rvalue_consistency(obj);
1764 return RVALUE_PIN_BITMAP(obj) != 0;
1765}
1766
1767static inline int
1768RVALUE_WB_UNPROTECTED(VALUE obj)
1769{
1770 check_rvalue_consistency(obj);
1771 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1772}
1773
1774static inline int
1775RVALUE_MARKING(VALUE obj)
1776{
1777 check_rvalue_consistency(obj);
1778 return RVALUE_MARKING_BITMAP(obj) != 0;
1779}
1780
1781static inline int
1782RVALUE_REMEMBERED(VALUE obj)
1783{
1784 check_rvalue_consistency(obj);
1785 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1786}
1787
1788static inline int
1789RVALUE_UNCOLLECTIBLE(VALUE obj)
1790{
1791 check_rvalue_consistency(obj);
1792 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1793}
1794
1795static inline int
1796RVALUE_OLD_P(VALUE obj)
1797{
1798 GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
1799 check_rvalue_consistency(obj);
1800 // Because this will only ever be called on GC controlled objects,
1801 // we can use the faster _RAW function here
1802 return RB_OBJ_PROMOTED_RAW(obj);
1803}
1804
1805static inline void
1806RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1807{
1808 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1809 objspace->rgengc.old_objects++;
1810
1811#if RGENGC_PROFILE >= 2
1812 objspace->profile.total_promoted_count++;
1813 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1814#endif
1815}
1816
1817static inline void
1818RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1819{
1820 RB_DEBUG_COUNTER_INC(obj_promote);
1821 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1822}
1823
1824/* set age to age+1 */
1825static inline void
1826RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1827{
1828 int age = RVALUE_AGE_GET((VALUE)obj);
1829
1830 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1831 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1832 }
1833
1834 age++;
1835 RVALUE_AGE_SET(obj, age);
1836
1837 if (age == RVALUE_OLD_AGE) {
1838 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1839 }
1840
1841 check_rvalue_consistency(obj);
1842}
1843
1844static inline void
1845RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1846{
1847 check_rvalue_consistency(obj);
1848 GC_ASSERT(!RVALUE_OLD_P(obj));
1849 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1850 check_rvalue_consistency(obj);
1851}
1852
1853static inline void
1854RVALUE_AGE_RESET(VALUE obj)
1855{
1856 RVALUE_AGE_SET(obj, 0);
1857}
1858
1859static inline void
1860RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1861{
1862 check_rvalue_consistency(obj);
1863 GC_ASSERT(RVALUE_OLD_P(obj));
1864
1865 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1866 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1867 }
1868
1869 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1870 RVALUE_AGE_RESET(obj);
1871
1872 if (RVALUE_MARKED(obj)) {
1873 objspace->rgengc.old_objects--;
1874 }
1875
1876 check_rvalue_consistency(obj);
1877}
1878
1879static inline int
1880RVALUE_BLACK_P(VALUE obj)
1881{
1882 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1883}
1884
1885#if 0
1886static inline int
1887RVALUE_GREY_P(VALUE obj)
1888{
1889 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1890}
1891#endif
1892
1893static inline int
1894RVALUE_WHITE_P(VALUE obj)
1895{
1896 return RVALUE_MARKED(obj) == FALSE;
1897}
1898
1899/*
1900 --------------------------- ObjectSpace -----------------------------
1901*/
1902
1903static inline void *
1904calloc1(size_t n)
1905{
1906 return calloc(1, n);
1907}
1908
1910rb_objspace_alloc(void)
1911{
1912 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1913 objspace->flags.measure_gc = 1;
1914 malloc_limit = gc_params.malloc_limit_min;
1915 objspace->finalize_deferred_pjob = rb_postponed_job_preregister(0, gc_finalize_deferred, objspace);
1916 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
1917 rb_bug("Could not preregister postponed job for GC");
1918 }
1919
1920 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1921 rb_size_pool_t *size_pool = &size_pools[i];
1922
1923 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1924
1925 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1926 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1927 }
1928
1929 rb_darray_make_without_gc(&objspace->weak_references, 0);
1930
1931 dont_gc_on();
1932
1933 return objspace;
1934}
1935
1936static void free_stack_chunks(mark_stack_t *);
1937static void mark_stack_free_cache(mark_stack_t *);
1938static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1939
1940void
1941rb_objspace_free(rb_objspace_t *objspace)
1942{
1943 if (is_lazy_sweeping(objspace))
1944 rb_bug("lazy sweeping underway when freeing object space");
1945
1946 free(objspace->profile.records);
1947 objspace->profile.records = NULL;
1948
1949 if (global_list) {
1950 struct gc_list *list, *next;
1951 for (list = global_list; list; list = next) {
1952 next = list->next;
1953 xfree(list);
1954 }
1955 }
1956 if (heap_pages_sorted) {
1957 size_t i;
1958 size_t total_heap_pages = heap_allocated_pages;
1959 for (i = 0; i < total_heap_pages; ++i) {
1960 heap_page_free(objspace, heap_pages_sorted[i]);
1961 }
1962 free(heap_pages_sorted);
1963 heap_allocated_pages = 0;
1964 heap_pages_sorted_length = 0;
1965 heap_pages_lomem = 0;
1966 heap_pages_himem = 0;
1967
1968 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1969 rb_size_pool_t *size_pool = &size_pools[i];
1970 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1971 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1972 }
1973 }
1974 st_free_table(objspace->id_to_obj_tbl);
1975 st_free_table(objspace->obj_to_id_tbl);
1976
1977 free_stack_chunks(&objspace->mark_stack);
1978 mark_stack_free_cache(&objspace->mark_stack);
1979
1980 rb_darray_free_without_gc(objspace->weak_references);
1981
1982 free(objspace);
1983}
1984
1985static void
1986heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1987{
1988 struct heap_page **sorted;
1989 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1990
1991 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1992 next_length, size);
1993
1994 if (heap_pages_sorted_length > 0) {
1995 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1996 if (sorted) heap_pages_sorted = sorted;
1997 }
1998 else {
1999 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
2000 }
2001
2002 if (sorted == 0) {
2003 rb_memerror();
2004 }
2005
2006 heap_pages_sorted_length = next_length;
2007}
2008
2009static void
2010heap_pages_expand_sorted(rb_objspace_t *objspace)
2011{
2012 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
2013 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
2014 * however, if there are pages which do not have empty slots, then try to create new pages
2015 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
2016 */
2017 size_t next_length = heap_allocatable_pages(objspace);
2018 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
2019 rb_size_pool_t *size_pool = &size_pools[i];
2020 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
2021 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
2022 }
2023
2024 if (next_length > heap_pages_sorted_length) {
2025 heap_pages_expand_sorted_to(objspace, next_length);
2026 }
2027
2028 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2029 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2030}
2031
2032static void
2033size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
2034{
2035 size_pool->allocatable_pages = s;
2036 heap_pages_expand_sorted(objspace);
2037}
2038
2039static inline void
2040heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
2041{
2042 ASSERT_vm_locking();
2043
2044 RVALUE *p = (RVALUE *)obj;
2045
2046 asan_unpoison_object(obj, false);
2047
2048 asan_unlock_freelist(page);
2049
2050 p->as.free.flags = 0;
2051 p->as.free.next = page->freelist;
2052 page->freelist = p;
2053 asan_lock_freelist(page);
2054
2055 RVALUE_AGE_RESET(obj);
2056
2057 if (RGENGC_CHECK_MODE &&
2058 /* obj should belong to page */
2059 !(page->start <= (uintptr_t)obj &&
2060 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2061 obj % BASE_SLOT_SIZE == 0)) {
2062 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
2063 }
2064
2065 asan_poison_object(obj);
2066 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
2067}
2068
2069static inline void
2070heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
2071{
2072 asan_unlock_freelist(page);
2073 GC_ASSERT(page->free_slots != 0);
2074 GC_ASSERT(page->freelist != NULL);
2075
2076 page->free_next = heap->free_pages;
2077 heap->free_pages = page;
2078
2079 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
2080
2081 asan_lock_freelist(page);
2082}
2083
2084static inline void
2085heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2086{
2087 asan_unlock_freelist(page);
2088 GC_ASSERT(page->free_slots != 0);
2089 GC_ASSERT(page->freelist != NULL);
2090
2091 page->free_next = heap->pooled_pages;
2092 heap->pooled_pages = page;
2093 objspace->rincgc.pooled_slots += page->free_slots;
2094
2095 asan_lock_freelist(page);
2096}
2097
2098static void
2099heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2100{
2101 ccan_list_del(&page->page_node);
2102 heap->total_pages--;
2103 heap->total_slots -= page->total_slots;
2104}
2105
2106static void rb_aligned_free(void *ptr, size_t size);
2107
2108static void
2109heap_page_body_free(struct heap_page_body *page_body)
2110{
2111 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2112
2113 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2114#ifdef HAVE_MMAP
2115 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2116 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2117 rb_bug("heap_page_body_free: munmap failed");
2118 }
2119#endif
2120 }
2121 else {
2122 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2123 }
2124}
2125
2126static void
2127heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
2128{
2129 heap_allocated_pages--;
2130 page->size_pool->total_freed_pages++;
2131 heap_page_body_free(GET_PAGE_BODY(page->start));
2132 free(page);
2133}
2134
2135static void
2136heap_pages_free_unused_pages(rb_objspace_t *objspace)
2137{
2138 size_t i, j;
2139
2140 bool has_pages_in_tomb_heap = FALSE;
2141 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2142 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2143 has_pages_in_tomb_heap = TRUE;
2144 break;
2145 }
2146 }
2147
2148 if (has_pages_in_tomb_heap) {
2149 for (i = j = 0; j < heap_allocated_pages; i++) {
2150 struct heap_page *page = heap_pages_sorted[i];
2151
2152 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2153 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2154 heap_page_free(objspace, page);
2155 }
2156 else {
2157 if (i != j) {
2158 heap_pages_sorted[j] = page;
2159 }
2160 j++;
2161 }
2162 }
2163
2164 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2165 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2166 GC_ASSERT(himem <= heap_pages_himem);
2167 heap_pages_himem = himem;
2168
2169 struct heap_page *lopage = heap_pages_sorted[0];
2170 uintptr_t lomem = (uintptr_t)lopage->start;
2171 GC_ASSERT(lomem >= heap_pages_lomem);
2172 heap_pages_lomem = lomem;
2173
2174 GC_ASSERT(j == heap_allocated_pages);
2175 }
2176}
2177
2178static struct heap_page_body *
2179heap_page_body_allocate(void)
2180{
2181 struct heap_page_body *page_body;
2182
2183 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2184#ifdef HAVE_MMAP
2185 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2186
2187 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2188 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2189 if (ptr == MAP_FAILED) {
2190 return NULL;
2191 }
2192
2193 char *aligned = ptr + HEAP_PAGE_ALIGN;
2194 aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2195 GC_ASSERT(aligned > ptr);
2196 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2197
2198 size_t start_out_of_range_size = aligned - ptr;
2199 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2200 if (start_out_of_range_size > 0) {
2201 if (munmap(ptr, start_out_of_range_size)) {
2202 rb_bug("heap_page_body_allocate: munmap failed for start");
2203 }
2204 }
2205
2206 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2207 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2208 if (end_out_of_range_size > 0) {
2209 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2210 rb_bug("heap_page_body_allocate: munmap failed for end");
2211 }
2212 }
2213
2214 page_body = (struct heap_page_body *)aligned;
2215#endif
2216 }
2217 else {
2218 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2219 }
2220
2221 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2222
2223 return page_body;
2224}
2225
2226static struct heap_page *
2227heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2228{
2229 uintptr_t start, end, p;
2230 struct heap_page *page;
2231 uintptr_t hi, lo, mid;
2232 size_t stride = size_pool->slot_size;
2233 unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
2234
2235 /* assign heap_page body (contains heap_page_header and RVALUEs) */
2236 struct heap_page_body *page_body = heap_page_body_allocate();
2237 if (page_body == 0) {
2238 rb_memerror();
2239 }
2240
2241 /* assign heap_page entry */
2242 page = calloc1(sizeof(struct heap_page));
2243 if (page == 0) {
2244 heap_page_body_free(page_body);
2245 rb_memerror();
2246 }
2247
2248 /* adjust obj_limit (object number available in this page) */
2249 start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
2250
2251 if (start % BASE_SLOT_SIZE != 0) {
2252 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2253 start = start + delta;
2254 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2255
2256 /* Find a num in page that is evenly divisible by `stride`.
2257 * This is to ensure that objects are aligned with bit planes.
2258 * In other words, ensure there are an even number of objects
2259 * per bit plane. */
2260 if (NUM_IN_PAGE(start) == 1) {
2261 start += stride - BASE_SLOT_SIZE;
2262 }
2263
2264 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2265
2266 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2267 }
2268 end = start + (limit * (int)stride);
2269
2270 /* setup heap_pages_sorted */
2271 lo = 0;
2272 hi = (uintptr_t)heap_allocated_pages;
2273 while (lo < hi) {
2274 struct heap_page *mid_page;
2275
2276 mid = (lo + hi) / 2;
2277 mid_page = heap_pages_sorted[mid];
2278 if ((uintptr_t)mid_page->start < start) {
2279 lo = mid + 1;
2280 }
2281 else if ((uintptr_t)mid_page->start > start) {
2282 hi = mid;
2283 }
2284 else {
2285 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2286 }
2287 }
2288
2289 if (hi < (uintptr_t)heap_allocated_pages) {
2290 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2291 }
2292
2293 heap_pages_sorted[hi] = page;
2294
2295 heap_allocated_pages++;
2296
2297 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2298 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2299 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2300
2301 size_pool->total_allocated_pages++;
2302
2303 if (heap_allocated_pages > heap_pages_sorted_length) {
2304 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2305 heap_allocated_pages, heap_pages_sorted_length);
2306 }
2307
2308 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2309 if (heap_pages_himem < end) heap_pages_himem = end;
2310
2311 page->start = start;
2312 page->total_slots = limit;
2313 page->slot_size = size_pool->slot_size;
2314 page->size_pool = size_pool;
2315 page_body->header.page = page;
2316
2317 for (p = start; p != end; p += stride) {
2318 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2319 heap_page_add_freeobj(objspace, page, (VALUE)p);
2320 }
2321 page->free_slots = limit;
2322
2323 asan_lock_freelist(page);
2324 return page;
2325}
2326
2327static struct heap_page *
2328heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2329{
2330 struct heap_page *page = 0, *next;
2331
2332 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2333 asan_unlock_freelist(page);
2334 if (page->freelist != NULL) {
2335 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2336 asan_lock_freelist(page);
2337 return page;
2338 }
2339 }
2340
2341 return NULL;
2342}
2343
2344static struct heap_page *
2345heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2346{
2347 struct heap_page *page;
2348 const char *method = "recycle";
2349
2350 size_pool->allocatable_pages--;
2351
2352 page = heap_page_resurrect(objspace, size_pool);
2353
2354 if (page == NULL) {
2355 page = heap_page_allocate(objspace, size_pool);
2356 method = "allocate";
2357 }
2358 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2359 "heap_allocated_pages: %"PRIdSIZE", "
2360 "heap_allocated_pages: %"PRIdSIZE", "
2361 "tomb->total_pages: %"PRIdSIZE"\n",
2362 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2363 return page;
2364}
2365
2366static void
2367heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2368{
2369 /* Adding to eden heap during incremental sweeping is forbidden */
2370 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2371 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2372 ccan_list_add_tail(&heap->pages, &page->page_node);
2373 heap->total_pages++;
2374 heap->total_slots += page->total_slots;
2375}
2376
2377static void
2378heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2379{
2380 struct heap_page *page = heap_page_create(objspace, size_pool);
2381 heap_add_page(objspace, size_pool, heap, page);
2382 heap_add_freepage(heap, page);
2383}
2384
2385#if GC_CAN_COMPILE_COMPACTION
2386static void
2387heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2388{
2389 size_t i;
2390
2391 size_pool_allocatable_pages_set(objspace, size_pool, add);
2392
2393 for (i = 0; i < add; i++) {
2394 heap_assign_page(objspace, size_pool, heap);
2395 }
2396
2397 GC_ASSERT(size_pool->allocatable_pages == 0);
2398}
2399#endif
2400
2401static size_t
2402slots_to_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t slots)
2403{
2404 size_t multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2405 /* Due to alignment, heap pages may have one less slot. We should
2406 * ensure there is enough pages to guarantee that we will have at
2407 * least the required number of slots after allocating all the pages. */
2408 size_t slots_per_page = (HEAP_PAGE_OBJ_LIMIT / multiple) - 1;
2409 return CEILDIV(slots, slots_per_page);
2410}
2411
2412static size_t
2413minimum_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2414{
2415 size_t size_pool_idx = size_pool - size_pools;
2416 size_t init_slots = gc_params.size_pool_init_slots[size_pool_idx];
2417 return slots_to_pages_for_size_pool(objspace, size_pool, init_slots);
2418}
2419
2420static size_t
2421heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots, size_t used)
2422{
2423 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2424 size_t next_used;
2425
2426 if (goal_ratio == 0.0) {
2427 next_used = (size_t)(used * gc_params.growth_factor);
2428 }
2429 else if (total_slots == 0) {
2430 next_used = minimum_pages_for_size_pool(objspace, size_pool);
2431 }
2432 else {
2433 /* Find `f' where free_slots = f * total_slots * goal_ratio
2434 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2435 */
2436 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2437
2438 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2439 if (f < 1.0) f = 1.1;
2440
2441 next_used = (size_t)(f * used);
2442
2443 if (0) {
2444 fprintf(stderr,
2445 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2446 " G(%1.2f), f(%1.2f),"
2447 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2448 free_slots, total_slots, free_slots/(double)total_slots,
2449 goal_ratio, f, used, next_used);
2450 }
2451 }
2452
2453 if (gc_params.growth_max_slots > 0) {
2454 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2455 if (next_used > max_used) next_used = max_used;
2456 }
2457
2458 size_t extend_page_count = next_used - used;
2459 /* Extend by at least 1 page. */
2460 if (extend_page_count == 0) extend_page_count = 1;
2461
2462 return extend_page_count;
2463}
2464
2465static int
2466heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2467{
2468 if (size_pool->allocatable_pages > 0) {
2469 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2470 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2471 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2472
2473 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2474 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2475
2476 heap_assign_page(objspace, size_pool, heap);
2477 return TRUE;
2478 }
2479 return FALSE;
2480}
2481
2482static void
2483gc_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2484{
2485 unsigned int lock_lev;
2486 gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2487
2488 /* Continue marking if in incremental marking. */
2489 if (is_incremental_marking(objspace)) {
2490 if (gc_marks_continue(objspace, size_pool, heap)) {
2491 gc_sweep(objspace);
2492 }
2493 }
2494
2495 /* Continue sweeping if in lazy sweeping or the previous incremental
2496 * marking finished and did not yield a free page. */
2497 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2498 gc_sweep_continue(objspace, size_pool, heap);
2499 }
2500
2501 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2502}
2503
2504static void
2505heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2506{
2507 GC_ASSERT(heap->free_pages == NULL);
2508
2509 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2510 gc_continue(objspace, size_pool, heap);
2511
2512 /* If we still don't have a free page and not allowed to create a new page,
2513 * we should start a new GC cycle. */
2514 if (heap->free_pages == NULL &&
2515 (will_be_incremental_marking(objspace) ||
2516 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2517 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2518 rb_memerror();
2519 }
2520 else {
2521 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2522 gc_continue(objspace, size_pool, heap);
2523
2524 /* If we're not incremental marking (e.g. a minor GC) or finished
2525 * sweeping and still don't have a free page, then
2526 * gc_sweep_finish_size_pool should allow us to create a new page. */
2527 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2528 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2529 rb_bug("cannot create a new page after GC");
2530 }
2531 else { // Major GC is required, which will allow us to create new page
2532 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2533 rb_memerror();
2534 }
2535 else {
2536 /* Do steps of incremental marking or lazy sweeping. */
2537 gc_continue(objspace, size_pool, heap);
2538
2539 if (heap->free_pages == NULL &&
2540 !heap_increment(objspace, size_pool, heap)) {
2541 rb_bug("cannot create a new page after major GC");
2542 }
2543 }
2544 }
2545 }
2546 }
2547 }
2548
2549 GC_ASSERT(heap->free_pages != NULL);
2550}
2551
2552void
2553rb_objspace_set_event_hook(const rb_event_flag_t event)
2554{
2555 rb_objspace_t *objspace = &rb_objspace;
2556 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2557 objspace->flags.has_newobj_hook = !!(objspace->hook_events & RUBY_INTERNAL_EVENT_NEWOBJ);
2558}
2559
2560static void
2561gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2562{
2563 if (UNLIKELY(!ec->cfp)) return;
2564 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2565}
2566
2567#define gc_event_newobj_hook_needed_p(objspace) ((objspace)->flags.has_newobj_hook)
2568#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2569
2570#define gc_event_hook_prep(objspace, event, data, prep) do { \
2571 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2572 prep; \
2573 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2574 } \
2575} while (0)
2576
2577#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2578
2579static inline VALUE
2580newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2581{
2582#if !__has_feature(memory_sanitizer)
2583 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2584 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2585#endif
2586 RVALUE *p = RANY(obj);
2587 p->as.basic.flags = flags;
2588 *((VALUE *)&p->as.basic.klass) = klass;
2589
2590 int t = flags & RUBY_T_MASK;
2591 if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
2592 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2593 }
2594
2595#if RACTOR_CHECK_MODE
2596 rb_ractor_setup_belonging(obj);
2597#endif
2598
2599#if RGENGC_CHECK_MODE
2600 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2601
2602 RB_VM_LOCK_ENTER_NO_BARRIER();
2603 {
2604 check_rvalue_consistency(obj);
2605
2606 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2607 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2608 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2609 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2610
2611 if (RVALUE_REMEMBERED((VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2612 }
2613 RB_VM_LOCK_LEAVE_NO_BARRIER();
2614#endif
2615
2616 if (UNLIKELY(wb_protected == FALSE)) {
2617 ASSERT_vm_locking();
2618 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2619 }
2620
2621#if RGENGC_PROFILE
2622 if (wb_protected) {
2623 objspace->profile.total_generated_normal_object_count++;
2624#if RGENGC_PROFILE >= 2
2625 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2626#endif
2627 }
2628 else {
2629 objspace->profile.total_generated_shady_object_count++;
2630#if RGENGC_PROFILE >= 2
2631 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2632#endif
2633 }
2634#endif
2635
2636#if GC_DEBUG
2637 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2638 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2639#endif
2640
2641 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2642
2643 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2644 return obj;
2645}
2646
2647size_t
2648rb_gc_obj_slot_size(VALUE obj)
2649{
2650 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2651}
2652
2653static inline size_t
2654size_pool_slot_size(unsigned char pool_id)
2655{
2656 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2657
2658 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2659
2660#if RGENGC_CHECK_MODE
2661 rb_objspace_t *objspace = &rb_objspace;
2662 GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2663#endif
2664
2665 slot_size -= RVALUE_OVERHEAD;
2666
2667 return slot_size;
2668}
2669
2670size_t
2671rb_size_pool_slot_size(unsigned char pool_id)
2672{
2673 return size_pool_slot_size(pool_id);
2674}
2675
2676bool
2677rb_gc_size_allocatable_p(size_t size)
2678{
2679 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2680}
2681
2682static inline VALUE
2683ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2684 size_t size_pool_idx)
2685{
2686 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2687 RVALUE *p = size_pool_cache->freelist;
2688
2689 if (is_incremental_marking(objspace)) {
2690 // Not allowed to allocate without running an incremental marking step
2691 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2692 return Qfalse;
2693 }
2694
2695 if (p) {
2696 cache->incremental_mark_step_allocated_slots++;
2697 }
2698 }
2699
2700 if (p) {
2701 VALUE obj = (VALUE)p;
2702 MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx);
2703 size_pool_cache->freelist = p->as.free.next;
2704 asan_unpoison_memory_region(p, stride, true);
2705#if RGENGC_CHECK_MODE
2706 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2707 // zero clear
2708 MEMZERO((char *)obj, char, stride);
2709#endif
2710 return obj;
2711 }
2712 else {
2713 return Qfalse;
2714 }
2715}
2716
2717static struct heap_page *
2718heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2719{
2720 ASSERT_vm_locking();
2721
2722 struct heap_page *page;
2723
2724 if (heap->free_pages == NULL) {
2725 heap_prepare(objspace, size_pool, heap);
2726 }
2727
2728 page = heap->free_pages;
2729 heap->free_pages = page->free_next;
2730
2731 GC_ASSERT(page->free_slots != 0);
2732 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2733
2734 asan_unlock_freelist(page);
2735
2736 return page;
2737}
2738
2739static inline void
2740ractor_cache_set_page(rb_ractor_newobj_cache_t *cache, size_t size_pool_idx,
2741 struct heap_page *page)
2742{
2743 gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2744
2745 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2746
2747 GC_ASSERT(size_pool_cache->freelist == NULL);
2748 GC_ASSERT(page->free_slots != 0);
2749 GC_ASSERT(page->freelist != NULL);
2750
2751 size_pool_cache->using_page = page;
2752 size_pool_cache->freelist = page->freelist;
2753 page->free_slots = 0;
2754 page->freelist = NULL;
2755
2756 asan_unpoison_object((VALUE)size_pool_cache->freelist, false);
2757 GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE));
2758 asan_poison_object((VALUE)size_pool_cache->freelist);
2759}
2760
2761static inline VALUE
2762newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2763{
2764 RVALUE *p = (RVALUE *)obj;
2765 p->as.values.v1 = v1;
2766 p->as.values.v2 = v2;
2767 p->as.values.v3 = v3;
2768 return obj;
2769}
2770
2771static inline size_t
2772size_pool_idx_for_size(size_t size)
2773{
2774 size += RVALUE_OVERHEAD;
2775
2776 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2777
2778 /* size_pool_idx is ceil(log2(slot_count)) */
2779 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2780
2781 if (size_pool_idx >= SIZE_POOL_COUNT) {
2782 rb_bug("size_pool_idx_for_size: allocation size too large "
2783 "(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
2784 }
2785
2786#if RGENGC_CHECK_MODE
2787 rb_objspace_t *objspace = &rb_objspace;
2788 GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
2789 if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
2790#endif
2791
2792 return size_pool_idx;
2793}
2794
2795static VALUE
2796newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
2797{
2798 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2799 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2800 rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
2801
2802 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2803
2804 if (UNLIKELY(obj == Qfalse)) {
2805 unsigned int lev;
2806 bool unlock_vm = false;
2807
2808 if (!vm_locked) {
2809 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2810 vm_locked = true;
2811 unlock_vm = true;
2812 }
2813
2814 {
2815 ASSERT_vm_locking();
2816
2817 if (is_incremental_marking(objspace)) {
2818 gc_continue(objspace, size_pool, heap);
2819 cache->incremental_mark_step_allocated_slots = 0;
2820
2821 // Retry allocation after resetting incremental_mark_step_allocated_slots
2822 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2823 }
2824
2825 if (obj == Qfalse) {
2826 // Get next free page (possibly running GC)
2827 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2828 ractor_cache_set_page(cache, size_pool_idx, page);
2829
2830 // Retry allocation after moving to new page
2831 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2832
2833 GC_ASSERT(obj != Qfalse);
2834 }
2835 }
2836
2837 if (unlock_vm) {
2838 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2839 }
2840 }
2841
2842 size_pool->total_allocated_objects++;
2843
2844 return obj;
2845}
2846
2847static void
2848newobj_zero_slot(VALUE obj)
2849{
2850 memset((char *)obj + sizeof(struct RBasic), 0, rb_gc_obj_slot_size(obj) - sizeof(struct RBasic));
2851}
2852
2853ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2854
2855static inline VALUE
2856newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2857{
2858 VALUE obj;
2859 unsigned int lev;
2860
2861 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2862 {
2863 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2864 if (during_gc) {
2865 dont_gc_on();
2866 during_gc = 0;
2867 rb_bug("object allocation during garbage collection phase");
2868 }
2869
2870 if (ruby_gc_stressful) {
2871 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2872 rb_memerror();
2873 }
2874 }
2875 }
2876
2877 obj = newobj_alloc(objspace, cr, size_pool_idx, true);
2878 newobj_init(klass, flags, wb_protected, objspace, obj);
2879
2880 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_zero_slot(obj));
2881 }
2882 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2883
2884 return obj;
2885}
2886
2887NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2888 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2889NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2890 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2891
2892static VALUE
2893newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2894{
2895 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2896}
2897
2898static VALUE
2899newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2900{
2901 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2902}
2903
2904static inline VALUE
2905newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2906{
2907 VALUE obj;
2908 rb_objspace_t *objspace = &rb_objspace;
2909
2910 RB_DEBUG_COUNTER_INC(obj_newobj);
2911 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2912
2913 if (UNLIKELY(stress_to_class)) {
2914 long i, cnt = RARRAY_LEN(stress_to_class);
2915 for (i = 0; i < cnt; ++i) {
2916 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2917 }
2918 }
2919
2920 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2921
2922 if (SHAPE_IN_BASIC_FLAGS || (flags & RUBY_T_MASK) == T_OBJECT) {
2923 flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2924 }
2925
2926 if (!UNLIKELY(during_gc ||
2927 ruby_gc_stressful ||
2928 gc_event_newobj_hook_needed_p(objspace)) &&
2929 wb_protected) {
2930 obj = newobj_alloc(objspace, cr, size_pool_idx, false);
2931 newobj_init(klass, flags, wb_protected, objspace, obj);
2932 }
2933 else {
2934 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2935
2936 obj = wb_protected ?
2937 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2938 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2939 }
2940
2941 return obj;
2942}
2943
2944static inline VALUE
2945newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2946{
2947 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2948 return newobj_fill(obj, v1, v2, v3);
2949}
2950
2951VALUE
2952rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2953{
2954 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2955 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
2956}
2957
2958VALUE
2959rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2960{
2961 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2962 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2963}
2964
2965/* for compatibility */
2966
2967VALUE
2968rb_newobj(void)
2969{
2970 return newobj_of(GET_RACTOR(), 0, T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2971}
2972
2973static VALUE
2974rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
2975{
2976 GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
2977 GC_ASSERT(flags & ROBJECT_EMBED);
2978
2979 size_t size;
2980 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2981
2982 size = rb_obj_embedded_size(index_tbl_num_entries);
2983 if (!rb_gc_size_allocatable_p(size)) {
2984 size = sizeof(struct RObject);
2985 }
2986
2987 VALUE obj = newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, wb_protected, size);
2988 RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT);
2989
2990 // Set the shape to the specific T_OBJECT shape which is always
2991 // SIZE_POOL_COUNT away from the root shape.
2992 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2993
2994#if RUBY_DEBUG
2995 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2996 VALUE *ptr = ROBJECT_IVPTR(obj);
2997 for (size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2998 ptr[i] = Qundef;
2999 }
3000#endif
3001
3002 return obj;
3003}
3004
3005VALUE
3006rb_newobj_of(VALUE klass, VALUE flags)
3007{
3008 if ((flags & RUBY_T_MASK) == T_OBJECT) {
3009 return rb_class_instance_allocate_internal(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED, flags & FL_WB_PROTECTED);
3010 }
3011 else {
3012 return newobj_of(GET_RACTOR(), klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, RVALUE_SIZE);
3013 }
3014}
3015
3016#define UNEXPECTED_NODE(func) \
3017 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
3018 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3019
3020const char *
3021rb_imemo_name(enum imemo_type type)
3022{
3023 // put no default case to get a warning if an imemo type is missing
3024 switch (type) {
3025#define IMEMO_NAME(x) case imemo_##x: return #x;
3026 IMEMO_NAME(env);
3027 IMEMO_NAME(cref);
3028 IMEMO_NAME(svar);
3029 IMEMO_NAME(throw_data);
3030 IMEMO_NAME(ifunc);
3031 IMEMO_NAME(memo);
3032 IMEMO_NAME(ment);
3033 IMEMO_NAME(iseq);
3034 IMEMO_NAME(tmpbuf);
3035 IMEMO_NAME(ast);
3036 IMEMO_NAME(parser_strterm);
3037 IMEMO_NAME(callinfo);
3038 IMEMO_NAME(callcache);
3039 IMEMO_NAME(constcache);
3040#undef IMEMO_NAME
3041 }
3042 return "unknown";
3043}
3044
3045#undef rb_imemo_new
3046
3047VALUE
3048rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3049{
3050 size_t size = RVALUE_SIZE;
3051 VALUE flags = T_IMEMO | (type << FL_USHIFT);
3052 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, TRUE, size);
3053}
3054
3055static VALUE
3056rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3057{
3058 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
3059 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
3060 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, FALSE, size);
3061}
3062
3063static VALUE
3064rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
3065{
3066 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
3067}
3068
3070rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
3071{
3072 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
3073}
3074
3075static size_t
3076imemo_memsize(VALUE obj)
3077{
3078 size_t size = 0;
3079 switch (imemo_type(obj)) {
3080 case imemo_ment:
3081 size += sizeof(RANY(obj)->as.imemo.ment.def);
3082 break;
3083 case imemo_iseq:
3084 size += rb_iseq_memsize((rb_iseq_t *)obj);
3085 break;
3086 case imemo_env:
3087 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
3088 break;
3089 case imemo_tmpbuf:
3090 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
3091 break;
3092 case imemo_ast:
3093 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3094 break;
3095 case imemo_cref:
3096 case imemo_svar:
3097 case imemo_throw_data:
3098 case imemo_ifunc:
3099 case imemo_memo:
3100 case imemo_parser_strterm:
3101 break;
3102 default:
3103 /* unreachable */
3104 break;
3105 }
3106 return size;
3107}
3108
3109#if IMEMO_DEBUG
3110VALUE
3111rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
3112{
3113 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
3114 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
3115 return memo;
3116}
3117#endif
3118
3119VALUE
3120rb_class_allocate_instance(VALUE klass)
3121{
3122 return rb_class_instance_allocate_internal(klass, T_OBJECT | ROBJECT_EMBED, RGENGC_WB_PROTECTED_OBJECT);
3123}
3124
3125static inline void
3126rb_data_object_check(VALUE klass)
3127{
3128 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3129 rb_undef_alloc_func(klass);
3130 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3131 }
3132}
3133
3134VALUE
3135rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3136{
3138 if (klass) rb_data_object_check(klass);
3139 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
3140}
3141
3142VALUE
3143rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3144{
3145 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3146 DATA_PTR(obj) = xcalloc(1, size);
3147 return obj;
3148}
3149
3150static VALUE
3151typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
3152{
3153 RBIMPL_NONNULL_ARG(type);
3154 if (klass) rb_data_object_check(klass);
3155 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
3156 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
3157}
3158
3159VALUE
3160rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
3161{
3162 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
3163 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
3164 }
3165
3166 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
3167}
3168
3169VALUE
3170rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
3171{
3172 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
3173 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
3174 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
3175 }
3176
3177 size_t embed_size = offsetof(struct RTypedData, data) + size;
3178 if (rb_gc_size_allocatable_p(embed_size)) {
3179 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
3180 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
3181 return obj;
3182 }
3183 }
3184
3185 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
3186 DATA_PTR(obj) = xcalloc(1, size);
3187 return obj;
3188}
3189
3190size_t
3191rb_objspace_data_type_memsize(VALUE obj)
3192{
3193 size_t size = 0;
3194 if (RTYPEDDATA_P(obj)) {
3195 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
3196 const void *ptr = RTYPEDDATA_GET_DATA(obj);
3197
3198 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3199#ifdef HAVE_MALLOC_USABLE_SIZE
3200 size += malloc_usable_size((void *)ptr);
3201#endif
3202 }
3203
3204 if (ptr && type->function.dsize) {
3205 size += type->function.dsize(ptr);
3206 }
3207 }
3208
3209 return size;
3210}
3211
3212const char *
3213rb_objspace_data_type_name(VALUE obj)
3214{
3215 if (RTYPEDDATA_P(obj)) {
3216 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
3217 }
3218 else {
3219 return 0;
3220 }
3221}
3222
3223static int
3224ptr_in_page_body_p(const void *ptr, const void *memb)
3225{
3226 struct heap_page *page = *(struct heap_page **)memb;
3227 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3228
3229 if ((uintptr_t)ptr >= p_body) {
3230 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3231 }
3232 else {
3233 return -1;
3234 }
3235}
3236
3237PUREFUNC(static inline struct heap_page * heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
3238static inline struct heap_page *
3239heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
3240{
3241 struct heap_page **res;
3242
3243 if (ptr < (uintptr_t)heap_pages_lomem ||
3244 ptr > (uintptr_t)heap_pages_himem) {
3245 return NULL;
3246 }
3247
3248 res = bsearch((void *)ptr, heap_pages_sorted,
3249 (size_t)heap_allocated_pages, sizeof(struct heap_page *),
3250 ptr_in_page_body_p);
3251
3252 if (res) {
3253 return *res;
3254 }
3255 else {
3256 return NULL;
3257 }
3258}
3259
3260PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
3261static inline int
3262is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
3263{
3264 register uintptr_t p = (uintptr_t)ptr;
3265 register struct heap_page *page;
3266
3267 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3268
3269 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
3270 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3271
3272 if (p % BASE_SLOT_SIZE != 0) return FALSE;
3273 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3274
3275 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3276 if (page) {
3277 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3278 if (page->flags.in_tomb) {
3279 return FALSE;
3280 }
3281 else {
3282 if (p < page->start) return FALSE;
3283 if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
3284 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
3285
3286 return TRUE;
3287 }
3288 }
3289 return FALSE;
3290}
3291
3292static enum rb_id_table_iterator_result
3293free_const_entry_i(VALUE value, void *data)
3294{
3295 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3296 xfree(ce);
3297 return ID_TABLE_CONTINUE;
3298}
3299
3300void
3301rb_free_const_table(struct rb_id_table *tbl)
3302{
3303 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3304 rb_id_table_free(tbl);
3305}
3306
3307// alive: if false, target pointers can be freed already.
3308// To check it, we need objspace parameter.
3309static void
3310vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
3311{
3312 if (ccs->entries) {
3313 for (int i=0; i<ccs->len; i++) {
3314 const struct rb_callcache *cc = ccs->entries[i].cc;
3315 if (!alive) {
3316 void *ptr = asan_unpoison_object_temporary((VALUE)cc);
3317 // ccs can be free'ed.
3318 if (is_pointer_to_heap(objspace, (void *)cc) &&
3319 IMEMO_TYPE_P(cc, imemo_callcache) &&
3320 cc->klass == klass) {
3321 // OK. maybe target cc.
3322 }
3323 else {
3324 if (ptr) {
3325 asan_poison_object((VALUE)cc);
3326 }
3327 continue;
3328 }
3329 if (ptr) {
3330 asan_poison_object((VALUE)cc);
3331 }
3332 }
3333
3334 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
3335 vm_cc_invalidate(cc);
3336 }
3337 ruby_xfree(ccs->entries);
3338 }
3339 ruby_xfree(ccs);
3340}
3341
3342void
3343rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
3344{
3345 RB_DEBUG_COUNTER_INC(ccs_free);
3346 vm_ccs_free(ccs, TRUE, NULL, Qundef);
3347}
3348
3350 rb_objspace_t *objspace;
3351 VALUE klass;
3352 bool alive;
3353};
3354
3355static enum rb_id_table_iterator_result
3356cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
3357{
3358 struct cc_tbl_i_data *data = data_ptr;
3359 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3360 VM_ASSERT(vm_ccs_p(ccs));
3361 VM_ASSERT(id == ccs->cme->called_id);
3362
3363 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3364 rb_vm_ccs_free(ccs);
3365 return ID_TABLE_DELETE;
3366 }
3367 else {
3368 gc_mark(data->objspace, (VALUE)ccs->cme);
3369
3370 for (int i=0; i<ccs->len; i++) {
3371 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3372 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3373
3374 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
3375 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
3376 }
3377 return ID_TABLE_CONTINUE;
3378 }
3379}
3380
3381static void
3382cc_table_mark(rb_objspace_t *objspace, VALUE klass)
3383{
3384 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3385 if (cc_tbl) {
3386 struct cc_tbl_i_data data = {
3387 .objspace = objspace,
3388 .klass = klass,
3389 };
3390 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3391 }
3392}
3393
3394static enum rb_id_table_iterator_result
3395cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
3396{
3397 struct cc_tbl_i_data *data = data_ptr;
3398 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3399 VM_ASSERT(vm_ccs_p(ccs));
3400 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3401 return ID_TABLE_CONTINUE;
3402}
3403
3404static void
3405cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
3406{
3407 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3408
3409 if (cc_tbl) {
3410 struct cc_tbl_i_data data = {
3411 .objspace = objspace,
3412 .klass = klass,
3413 .alive = alive,
3414 };
3415 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3416 rb_id_table_free(cc_tbl);
3417 }
3418}
3419
3420static enum rb_id_table_iterator_result
3421cvar_table_free_i(VALUE value, void * ctx)
3422{
3423 xfree((void *) value);
3424 return ID_TABLE_CONTINUE;
3425}
3426
3427void
3428rb_cc_table_free(VALUE klass)
3429{
3430 cc_table_free(&rb_objspace, klass, TRUE);
3431}
3432
3433static inline void
3434make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3435{
3436 struct RZombie *zombie = RZOMBIE(obj);
3437 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3438 zombie->dfree = dfree;
3439 zombie->data = data;
3440 VALUE prev, next = heap_pages_deferred_final;
3441 do {
3442 zombie->next = prev = next;
3443 next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
3444 } while (next != prev);
3445
3446 struct heap_page *page = GET_HEAP_PAGE(obj);
3447 page->final_slots++;
3448 heap_pages_final_slots++;
3449}
3450
3451static inline void
3452make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3453{
3454 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3455 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3456}
3457
3458static void
3459obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3460{
3461 ASSERT_vm_locking();
3462 st_data_t o = (st_data_t)obj, id;
3463
3464 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3466
3467 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3468 GC_ASSERT(id);
3469 st_delete(objspace->id_to_obj_tbl, &id, NULL);
3470 }
3471 else {
3472 rb_bug("Object ID seen, but not in mapping table: %s", obj_info(obj));
3473 }
3474}
3475
3476static bool
3477rb_data_free(rb_objspace_t *objspace, VALUE obj)
3478{
3479 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3480 if (data) {
3481 int free_immediately = false;
3482 void (*dfree)(void *);
3483
3484 if (RTYPEDDATA_P(obj)) {
3485 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3486 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3487 }
3488 else {
3489 dfree = RANY(obj)->as.data.dfree;
3490 }
3491
3492 if (dfree) {
3493 if (dfree == RUBY_DEFAULT_FREE) {
3494 if (!RTYPEDDATA_EMBEDDED_P(obj)) {
3495 xfree(data);
3496 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3497 }
3498 }
3499 else if (free_immediately) {
3500 (*dfree)(data);
3501 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3502 xfree(data);
3503 }
3504
3505 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3506 }
3507 else {
3508 make_zombie(objspace, obj, dfree, data);
3509 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3510 return FALSE;
3511 }
3512 }
3513 else {
3514 RB_DEBUG_COUNTER_INC(obj_data_empty);
3515 }
3516 }
3517
3518 return true;
3519}
3520
3521static int
3522obj_free(rb_objspace_t *objspace, VALUE obj)
3523{
3524 RB_DEBUG_COUNTER_INC(obj_free);
3525 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3526
3527 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3528
3529 switch (BUILTIN_TYPE(obj)) {
3530 case T_NIL:
3531 case T_FIXNUM:
3532 case T_TRUE:
3533 case T_FALSE:
3534 rb_bug("obj_free() called for broken object");
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 if (FL_TEST(obj, FL_EXIVAR)) {
3542 FL_UNSET(obj, FL_EXIVAR);
3543 }
3544
3545 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3546 obj_free_object_id(objspace, obj);
3547 }
3548
3549 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3550
3551#if RGENGC_CHECK_MODE
3552#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3553 CHECK(RVALUE_WB_UNPROTECTED);
3554 CHECK(RVALUE_MARKED);
3555 CHECK(RVALUE_MARKING);
3556 CHECK(RVALUE_UNCOLLECTIBLE);
3557#undef CHECK
3558#endif
3559
3560 switch (BUILTIN_TYPE(obj)) {
3561 case T_OBJECT:
3562 if (rb_shape_obj_too_complex(obj)) {
3563 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3564 st_free_table(ROBJECT_IV_HASH(obj));
3565 }
3566 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3567 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3568 }
3569 else {
3570 xfree(RANY(obj)->as.object.as.heap.ivptr);
3571 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3572 }
3573 break;
3574 case T_MODULE:
3575 case T_CLASS:
3576 rb_id_table_free(RCLASS_M_TBL(obj));
3577 cc_table_free(objspace, obj, FALSE);
3578 if (rb_shape_obj_too_complex(obj)) {
3579 st_free_table((st_table *)RCLASS_IVPTR(obj));
3580 }
3581 else if (RCLASS_IVPTR(obj)) {
3582 xfree(RCLASS_IVPTR(obj));
3583 }
3584
3585 if (RCLASS_CONST_TBL(obj)) {
3586 rb_free_const_table(RCLASS_CONST_TBL(obj));
3587 }
3588 if (RCLASS_CVC_TBL(obj)) {
3589 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3590 rb_id_table_free(RCLASS_CVC_TBL(obj));
3591 }
3592 rb_class_remove_subclass_head(obj);
3593 rb_class_remove_from_module_subclasses(obj);
3594 rb_class_remove_from_super_subclasses(obj);
3595 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3596 xfree(RCLASS_SUPERCLASSES(obj));
3597 }
3598
3599 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3600 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3601 break;
3602 case T_STRING:
3603 rb_str_free(obj);
3604 break;
3605 case T_ARRAY:
3606 rb_ary_free(obj);
3607 break;
3608 case T_HASH:
3609#if USE_DEBUG_COUNTER
3610 switch (RHASH_SIZE(obj)) {
3611 case 0:
3612 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3613 break;
3614 case 1:
3615 RB_DEBUG_COUNTER_INC(obj_hash_1);
3616 break;
3617 case 2:
3618 RB_DEBUG_COUNTER_INC(obj_hash_2);
3619 break;
3620 case 3:
3621 RB_DEBUG_COUNTER_INC(obj_hash_3);
3622 break;
3623 case 4:
3624 RB_DEBUG_COUNTER_INC(obj_hash_4);
3625 break;
3626 case 5:
3627 case 6:
3628 case 7:
3629 case 8:
3630 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3631 break;
3632 default:
3633 GC_ASSERT(RHASH_SIZE(obj) > 8);
3634 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3635 }
3636
3637 if (RHASH_AR_TABLE_P(obj)) {
3638 if (RHASH_AR_TABLE(obj) == NULL) {
3639 RB_DEBUG_COUNTER_INC(obj_hash_null);
3640 }
3641 else {
3642 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3643 }
3644 }
3645 else {
3646 RB_DEBUG_COUNTER_INC(obj_hash_st);
3647 }
3648#endif
3649
3650 rb_hash_free(obj);
3651 break;
3652 case T_REGEXP:
3653 if (RANY(obj)->as.regexp.ptr) {
3654 onig_free(RANY(obj)->as.regexp.ptr);
3655 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3656 }
3657 break;
3658 case T_DATA:
3659 if (!rb_data_free(objspace, obj)) return false;
3660 break;
3661 case T_MATCH:
3662 {
3663 rb_matchext_t *rm = RMATCH_EXT(obj);
3664#if USE_DEBUG_COUNTER
3665 if (rm->regs.num_regs >= 8) {
3666 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3667 }
3668 else if (rm->regs.num_regs >= 4) {
3669 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3670 }
3671 else if (rm->regs.num_regs >= 1) {
3672 RB_DEBUG_COUNTER_INC(obj_match_under4);
3673 }
3674#endif
3675 onig_region_free(&rm->regs, 0);
3676 if (rm->char_offset)
3677 xfree(rm->char_offset);
3678
3679 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3680 }
3681 break;
3682 case T_FILE:
3683 if (RANY(obj)->as.file.fptr) {
3684 make_io_zombie(objspace, obj);
3685 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3686 return FALSE;
3687 }
3688 break;
3689 case T_RATIONAL:
3690 RB_DEBUG_COUNTER_INC(obj_rational);
3691 break;
3692 case T_COMPLEX:
3693 RB_DEBUG_COUNTER_INC(obj_complex);
3694 break;
3695 case T_MOVED:
3696 break;
3697 case T_ICLASS:
3698 /* Basically , T_ICLASS shares table with the module */
3699 if (RICLASS_OWNS_M_TBL_P(obj)) {
3700 /* Method table is not shared for origin iclasses of classes */
3701 rb_id_table_free(RCLASS_M_TBL(obj));
3702 }
3703 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3704 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3705 }
3706 rb_class_remove_subclass_head(obj);
3707 cc_table_free(objspace, obj, FALSE);
3708 rb_class_remove_from_module_subclasses(obj);
3709 rb_class_remove_from_super_subclasses(obj);
3710
3711 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3712 break;
3713
3714 case T_FLOAT:
3715 RB_DEBUG_COUNTER_INC(obj_float);
3716 break;
3717
3718 case T_BIGNUM:
3719 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3720 xfree(BIGNUM_DIGITS(obj));
3721 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3722 }
3723 else {
3724 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3725 }
3726 break;
3727
3728 case T_NODE:
3729 UNEXPECTED_NODE(obj_free);
3730 break;
3731
3732 case T_STRUCT:
3733 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3734 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3735 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3736 }
3737 else {
3738 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3739 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3740 }
3741 break;
3742
3743 case T_SYMBOL:
3744 {
3745 rb_gc_free_dsymbol(obj);
3746 RB_DEBUG_COUNTER_INC(obj_symbol);
3747 }
3748 break;
3749
3750 case T_IMEMO:
3751 switch (imemo_type(obj)) {
3752 case imemo_ment:
3753 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3754 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3755 break;
3756 case imemo_iseq:
3757 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3758 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3759 break;
3760 case imemo_env:
3761 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3762 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3763 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3764 break;
3765 case imemo_tmpbuf:
3766 xfree(RANY(obj)->as.imemo.alloc.ptr);
3767 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3768 break;
3769 case imemo_ast:
3770 rb_ast_free(&RANY(obj)->as.imemo.ast);
3771 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3772 break;
3773 case imemo_cref:
3774 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3775 break;
3776 case imemo_svar:
3777 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3778 break;
3779 case imemo_throw_data:
3780 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3781 break;
3782 case imemo_ifunc:
3783 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3784 break;
3785 case imemo_memo:
3786 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3787 break;
3788 case imemo_parser_strterm:
3789 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3790 break;
3791 case imemo_callinfo:
3792 {
3793 const struct rb_callinfo * ci = ((const struct rb_callinfo *)obj);
3794 if (ci->kwarg) {
3795 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
3796 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
3797 }
3798 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3799 break;
3800 }
3801 case imemo_callcache:
3802 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3803 break;
3804 case imemo_constcache:
3805 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3806 break;
3807 }
3808 return TRUE;
3809
3810 default:
3811 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3812 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3813 }
3814
3815 if (FL_TEST(obj, FL_FINALIZE)) {
3816 make_zombie(objspace, obj, 0, 0);
3817 return FALSE;
3818 }
3819 else {
3820 return TRUE;
3821 }
3822}
3823
3824
3825#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3826#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3827
3828static int
3829object_id_cmp(st_data_t x, st_data_t y)
3830{
3831 if (RB_BIGNUM_TYPE_P(x)) {
3832 return !rb_big_eql(x, y);
3833 }
3834 else {
3835 return x != y;
3836 }
3837}
3838
3839static st_index_t
3840object_id_hash(st_data_t n)
3841{
3842 if (RB_BIGNUM_TYPE_P(n)) {
3843 return FIX2LONG(rb_big_hash(n));
3844 }
3845 else {
3846 return st_numhash(n);
3847 }
3848}
3849static const struct st_hash_type object_id_hash_type = {
3850 object_id_cmp,
3851 object_id_hash,
3852};
3853
3854void
3855Init_heap(void)
3856{
3857 rb_objspace_t *objspace = &rb_objspace;
3858
3859#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3860 /* Need to determine if we can use mmap at runtime. */
3861 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3862#endif
3863
3864 objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3865 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3866 objspace->obj_to_id_tbl = st_init_numtable();
3867
3868#if RGENGC_ESTIMATE_OLDMALLOC
3869 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3870#endif
3871
3872 /* Set size pools allocatable pages. */
3873 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3874 rb_size_pool_t *size_pool = &size_pools[i];
3875
3876 /* Set the default value of size_pool_init_slots. */
3877 gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
3878
3879 size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
3880 }
3881 heap_pages_expand_sorted(objspace);
3882
3883 init_mark_stack(&objspace->mark_stack);
3884
3885 objspace->profile.invoke_time = getrusage_time();
3886 finalizer_table = st_init_numtable();
3887}
3888
3889void
3890Init_gc_stress(void)
3891{
3892 rb_objspace_t *objspace = &rb_objspace;
3893
3894 gc_stress_set(objspace, ruby_initial_gc_stress);
3895}
3896
3897typedef int each_obj_callback(void *, void *, size_t, void *);
3898typedef int each_page_callback(struct heap_page *, void *);
3899
3900static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3901static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3902
3904 rb_objspace_t *objspace;
3905 bool reenable_incremental;
3906
3907 each_obj_callback *each_obj_callback;
3908 each_page_callback *each_page_callback;
3909 void *data;
3910
3911 struct heap_page **pages[SIZE_POOL_COUNT];
3912 size_t pages_counts[SIZE_POOL_COUNT];
3913};
3914
3915static VALUE
3916objspace_each_objects_ensure(VALUE arg)
3917{
3918 struct each_obj_data *data = (struct each_obj_data *)arg;
3919 rb_objspace_t *objspace = data->objspace;
3920
3921 /* Reenable incremental GC */
3922 if (data->reenable_incremental) {
3923 objspace->flags.dont_incremental = FALSE;
3924 }
3925
3926 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3927 struct heap_page **pages = data->pages[i];
3928 free(pages);
3929 }
3930
3931 return Qnil;
3932}
3933
3934static VALUE
3935objspace_each_objects_try(VALUE arg)
3936{
3937 struct each_obj_data *data = (struct each_obj_data *)arg;
3938 rb_objspace_t *objspace = data->objspace;
3939
3940 /* Copy pages from all size_pools to their respective buffers. */
3941 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3942 rb_size_pool_t *size_pool = &size_pools[i];
3943 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3944
3945 struct heap_page **pages = malloc(size);
3946 if (!pages) rb_memerror();
3947
3948 /* Set up pages buffer by iterating over all pages in the current eden
3949 * heap. This will be a snapshot of the state of the heap before we
3950 * call the callback over each page that exists in this buffer. Thus it
3951 * is safe for the callback to allocate objects without possibly entering
3952 * an infinite loop. */
3953 struct heap_page *page = 0;
3954 size_t pages_count = 0;
3955 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3956 pages[pages_count] = page;
3957 pages_count++;
3958 }
3959 data->pages[i] = pages;
3960 data->pages_counts[i] = pages_count;
3961 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3962 }
3963
3964 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3965 rb_size_pool_t *size_pool = &size_pools[i];
3966 size_t pages_count = data->pages_counts[i];
3967 struct heap_page **pages = data->pages[i];
3968
3969 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3970 for (size_t i = 0; i < pages_count; i++) {
3971 /* If we have reached the end of the linked list then there are no
3972 * more pages, so break. */
3973 if (page == NULL) break;
3974
3975 /* If this page does not match the one in the buffer, then move to
3976 * the next page in the buffer. */
3977 if (pages[i] != page) continue;
3978
3979 uintptr_t pstart = (uintptr_t)page->start;
3980 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3981
3982 if (!__asan_region_is_poisoned((void *)pstart, pend - pstart)) {
3983 if (data->each_obj_callback &&
3984 (*data->each_obj_callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3985 break;
3986 }
3987 if (data->each_page_callback &&
3988 (*data->each_page_callback)(page, data->data)) {
3989 break;
3990 }
3991 }
3992
3993 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3994 }
3995 }
3996
3997 return Qnil;
3998}
3999
4000/*
4001 * rb_objspace_each_objects() is special C API to walk through
4002 * Ruby object space. This C API is too difficult to use it.
4003 * To be frank, you should not use it. Or you need to read the
4004 * source code of this function and understand what this function does.
4005 *
4006 * 'callback' will be called several times (the number of heap page,
4007 * at current implementation) with:
4008 * vstart: a pointer to the first living object of the heap_page.
4009 * vend: a pointer to next to the valid heap_page area.
4010 * stride: a distance to next VALUE.
4011 *
4012 * If callback() returns non-zero, the iteration will be stopped.
4013 *
4014 * This is a sample callback code to iterate liveness objects:
4015 *
4016 * static int
4017 * sample_callback(void *vstart, void *vend, int stride, void *data)
4018 * {
4019 * VALUE v = (VALUE)vstart;
4020 * for (; v != (VALUE)vend; v += stride) {
4021 * if (!rb_objspace_internal_object_p(v)) { // liveness check
4022 * // do something with live object 'v'
4023 * }
4024 * }
4025 * return 0; // continue to iteration
4026 * }
4027 *
4028 * Note: 'vstart' is not a top of heap_page. This point the first
4029 * living object to grasp at least one object to avoid GC issue.
4030 * This means that you can not walk through all Ruby object page
4031 * including freed object page.
4032 *
4033 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
4034 * However, there are possibilities to pass variable values with
4035 * 'stride' with some reasons. You must use stride instead of
4036 * use some constant value in the iteration.
4037 */
4038void
4039rb_objspace_each_objects(each_obj_callback *callback, void *data)
4040{
4041 objspace_each_objects(&rb_objspace, callback, data, TRUE);
4042}
4043
4044static void
4045objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
4046{
4047 /* Disable incremental GC */
4048 rb_objspace_t *objspace = each_obj_data->objspace;
4049 bool reenable_incremental = FALSE;
4050 if (protected) {
4051 reenable_incremental = !objspace->flags.dont_incremental;
4052
4053 gc_rest(objspace);
4054 objspace->flags.dont_incremental = TRUE;
4055 }
4056
4057 each_obj_data->reenable_incremental = reenable_incremental;
4058 memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
4059 memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
4060 rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
4061 objspace_each_objects_ensure, (VALUE)each_obj_data);
4062}
4063
4064static void
4065objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
4066{
4067 struct each_obj_data each_obj_data = {
4068 .objspace = objspace,
4069 .each_obj_callback = callback,
4070 .each_page_callback = NULL,
4071 .data = data,
4072 };
4073 objspace_each_exec(protected, &each_obj_data);
4074}
4075
4076static void
4077objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
4078{
4079 struct each_obj_data each_obj_data = {
4080 .objspace = objspace,
4081 .each_obj_callback = NULL,
4082 .each_page_callback = callback,
4083 .data = data,
4084 };
4085 objspace_each_exec(protected, &each_obj_data);
4086}
4087
4088void
4089rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
4090{
4091 objspace_each_objects(&rb_objspace, callback, data, FALSE);
4092}
4093
4095 size_t num;
4096 VALUE of;
4097};
4098
4099static int
4100internal_object_p(VALUE obj)
4101{
4102 RVALUE *p = (RVALUE *)obj;
4103 void *ptr = asan_unpoison_object_temporary(obj);
4104 bool used_p = p->as.basic.flags;
4105
4106 if (used_p) {
4107 switch (BUILTIN_TYPE(obj)) {
4108 case T_NODE:
4109 UNEXPECTED_NODE(internal_object_p);
4110 break;
4111 case T_NONE:
4112 case T_MOVED:
4113 case T_IMEMO:
4114 case T_ICLASS:
4115 case T_ZOMBIE:
4116 break;
4117 case T_CLASS:
4118 if (!p->as.basic.klass) break;
4119 if (FL_TEST(obj, FL_SINGLETON)) {
4120 return rb_singleton_class_internal_p(obj);
4121 }
4122 return 0;
4123 default:
4124 if (!p->as.basic.klass) break;
4125 return 0;
4126 }
4127 }
4128 if (ptr || ! used_p) {
4129 asan_poison_object(obj);
4130 }
4131 return 1;
4132}
4133
4134int
4135rb_objspace_internal_object_p(VALUE obj)
4136{
4137 return internal_object_p(obj);
4138}
4139
4140static int
4141os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
4142{
4143 struct os_each_struct *oes = (struct os_each_struct *)data;
4144
4145 VALUE v = (VALUE)vstart;
4146 for (; v != (VALUE)vend; v += stride) {
4147 if (!internal_object_p(v)) {
4148 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
4149 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
4150 rb_yield(v);
4151 oes->num++;
4152 }
4153 }
4154 }
4155 }
4156
4157 return 0;
4158}
4159
4160static VALUE
4161os_obj_of(VALUE of)
4162{
4163 struct os_each_struct oes;
4164
4165 oes.num = 0;
4166 oes.of = of;
4167 rb_objspace_each_objects(os_obj_of_i, &oes);
4168 return SIZET2NUM(oes.num);
4169}
4170
4171/*
4172 * call-seq:
4173 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
4174 * ObjectSpace.each_object([module]) -> an_enumerator
4175 *
4176 * Calls the block once for each living, nonimmediate object in this
4177 * Ruby process. If <i>module</i> is specified, calls the block
4178 * for only those classes or modules that match (or are a subclass of)
4179 * <i>module</i>. Returns the number of objects found. Immediate
4180 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
4181 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
4182 * never returned. In the example below, #each_object returns both
4183 * the numbers we defined and several constants defined in the Math
4184 * module.
4185 *
4186 * If no block is given, an enumerator is returned instead.
4187 *
4188 * a = 102.7
4189 * b = 95 # Won't be returned
4190 * c = 12345678987654321
4191 * count = ObjectSpace.each_object(Numeric) {|x| p x }
4192 * puts "Total count: #{count}"
4193 *
4194 * <em>produces:</em>
4195 *
4196 * 12345678987654321
4197 * 102.7
4198 * 2.71828182845905
4199 * 3.14159265358979
4200 * 2.22044604925031e-16
4201 * 1.7976931348623157e+308
4202 * 2.2250738585072e-308
4203 * Total count: 7
4204 *
4205 */
4206
4207static VALUE
4208os_each_obj(int argc, VALUE *argv, VALUE os)
4209{
4210 VALUE of;
4211
4212 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
4213 RETURN_ENUMERATOR(os, 1, &of);
4214 return os_obj_of(of);
4215}
4216
4217/*
4218 * call-seq:
4219 * ObjectSpace.undefine_finalizer(obj)
4220 *
4221 * Removes all finalizers for <i>obj</i>.
4222 *
4223 */
4224
4225static VALUE
4226undefine_final(VALUE os, VALUE obj)
4227{
4228 return rb_undefine_finalizer(obj);
4229}
4230
4231VALUE
4232rb_undefine_finalizer(VALUE obj)
4233{
4234 rb_objspace_t *objspace = &rb_objspace;
4235 st_data_t data = obj;
4236 rb_check_frozen(obj);
4237 st_delete(finalizer_table, &data, 0);
4238 FL_UNSET(obj, FL_FINALIZE);
4239 return obj;
4240}
4241
4242static void
4243should_be_callable(VALUE block)
4244{
4245 if (!rb_obj_respond_to(block, idCall, TRUE)) {
4246 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
4247 rb_obj_class(block));
4248 }
4249}
4250
4251static void
4252should_be_finalizable(VALUE obj)
4253{
4254 if (!FL_ABLE(obj)) {
4255 rb_raise(rb_eArgError, "cannot define finalizer for %s",
4256 rb_obj_classname(obj));
4257 }
4258 rb_check_frozen(obj);
4259}
4260
4261VALUE
4262rb_define_finalizer_no_check(VALUE obj, VALUE block)
4263{
4264 rb_objspace_t *objspace = &rb_objspace;
4265 VALUE table;
4266 st_data_t data;
4267
4268 RBASIC(obj)->flags |= FL_FINALIZE;
4269
4270 if (st_lookup(finalizer_table, obj, &data)) {
4271 table = (VALUE)data;
4272
4273 /* avoid duplicate block, table is usually small */
4274 {
4275 long len = RARRAY_LEN(table);
4276 long i;
4277
4278 for (i = 0; i < len; i++) {
4279 VALUE recv = RARRAY_AREF(table, i);
4280 if (rb_equal(recv, block)) {
4281 block = recv;
4282 goto end;
4283 }
4284 }
4285 }
4286
4287 rb_ary_push(table, block);
4288 }
4289 else {
4290 table = rb_ary_new3(1, block);
4291 RBASIC_CLEAR_CLASS(table);
4292 st_add_direct(finalizer_table, obj, table);
4293 }
4294 end:
4295 block = rb_ary_new3(2, INT2FIX(0), block);
4296 OBJ_FREEZE(block);
4297 return block;
4298}
4299
4300/*
4301 * call-seq:
4302 * ObjectSpace.define_finalizer(obj, aProc=proc())
4303 *
4304 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
4305 * was destroyed. The object ID of the <i>obj</i> will be passed
4306 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
4307 * method, make sure it can be called with a single argument.
4308 *
4309 * The return value is an array <code>[0, aProc]</code>.
4310 *
4311 * The two recommended patterns are to either create the finaliser proc
4312 * in a non-instance method where it can safely capture the needed state,
4313 * or to use a custom callable object that stores the needed state
4314 * explicitly as instance variables.
4315 *
4316 * class Foo
4317 * def initialize(data_needed_for_finalization)
4318 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
4319 * end
4320 *
4321 * def self.create_finalizer(data_needed_for_finalization)
4322 * proc {
4323 * puts "finalizing #{data_needed_for_finalization}"
4324 * }
4325 * end
4326 * end
4327 *
4328 * class Bar
4329 * class Remover
4330 * def initialize(data_needed_for_finalization)
4331 * @data_needed_for_finalization = data_needed_for_finalization
4332 * end
4333 *
4334 * def call(id)
4335 * puts "finalizing #{@data_needed_for_finalization}"
4336 * end
4337 * end
4338 *
4339 * def initialize(data_needed_for_finalization)
4340 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
4341 * end
4342 * end
4343 *
4344 * Note that if your finalizer references the object to be
4345 * finalized it will never be run on GC, although it will still be
4346 * run at exit. You will get a warning if you capture the object
4347 * to be finalized as the receiver of the finalizer.
4348 *
4349 * class CapturesSelf
4350 * def initialize(name)
4351 * ObjectSpace.define_finalizer(self, proc {
4352 * # this finalizer will only be run on exit
4353 * puts "finalizing #{name}"
4354 * })
4355 * end
4356 * end
4357 *
4358 * Also note that finalization can be unpredictable and is never guaranteed
4359 * to be run except on exit.
4360 */
4361
4362static VALUE
4363define_final(int argc, VALUE *argv, VALUE os)
4364{
4365 VALUE obj, block;
4366
4367 rb_scan_args(argc, argv, "11", &obj, &block);
4368 should_be_finalizable(obj);
4369 if (argc == 1) {
4370 block = rb_block_proc();
4371 }
4372 else {
4373 should_be_callable(block);
4374 }
4375
4376 if (rb_callable_receiver(block) == obj) {
4377 rb_warn("finalizer references object to be finalized");
4378 }
4379
4380 return rb_define_finalizer_no_check(obj, block);
4381}
4382
4383VALUE
4384rb_define_finalizer(VALUE obj, VALUE block)
4385{
4386 should_be_finalizable(obj);
4387 should_be_callable(block);
4388 return rb_define_finalizer_no_check(obj, block);
4389}
4390
4391void
4392rb_gc_copy_finalizer(VALUE dest, VALUE obj)
4393{
4394 rb_objspace_t *objspace = &rb_objspace;
4395 VALUE table;
4396 st_data_t data;
4397
4398 if (!FL_TEST(obj, FL_FINALIZE)) return;
4399 if (st_lookup(finalizer_table, obj, &data)) {
4400 table = (VALUE)data;
4401 st_insert(finalizer_table, dest, table);
4402 }
4403 FL_SET(dest, FL_FINALIZE);
4404}
4405
4406static VALUE
4407run_single_final(VALUE cmd, VALUE objid)
4408{
4409 return rb_check_funcall(cmd, idCall, 1, &objid);
4410}
4411
4412static void
4413warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
4414{
4415 if (!UNDEF_P(final) && !NIL_P(ruby_verbose)) {
4416 VALUE errinfo = ec->errinfo;
4417 rb_warn("Exception in finalizer %+"PRIsVALUE, final);
4418 rb_ec_error_print(ec, errinfo);
4419 }
4420}
4421
4422static void
4423run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
4424{
4425 long i;
4426 enum ruby_tag_type state;
4427 volatile struct {
4428 VALUE errinfo;
4429 VALUE objid;
4430 VALUE final;
4431 rb_control_frame_t *cfp;
4432 VALUE *sp;
4433 long finished;
4434 } saved;
4435
4436 rb_execution_context_t * volatile ec = GET_EC();
4437#define RESTORE_FINALIZER() (\
4438 ec->cfp = saved.cfp, \
4439 ec->cfp->sp = saved.sp, \
4440 ec->errinfo = saved.errinfo)
4441
4442 saved.errinfo = ec->errinfo;
4443 saved.objid = rb_obj_id(obj);
4444 saved.cfp = ec->cfp;
4445 saved.sp = ec->cfp->sp;
4446 saved.finished = 0;
4447 saved.final = Qundef;
4448
4449 EC_PUSH_TAG(ec);
4450 state = EC_EXEC_TAG();
4451 if (state != TAG_NONE) {
4452 ++saved.finished; /* skip failed finalizer */
4453 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4454 }
4455 for (i = saved.finished;
4456 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4457 saved.finished = ++i) {
4458 run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4459 }
4460 EC_POP_TAG();
4461#undef RESTORE_FINALIZER
4462}
4463
4464static void
4465run_final(rb_objspace_t *objspace, VALUE zombie)
4466{
4467 st_data_t key, table;
4468
4469 if (RZOMBIE(zombie)->dfree) {
4470 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4471 }
4472
4473 key = (st_data_t)zombie;
4474 if (st_delete(finalizer_table, &key, &table)) {
4475 run_finalizer(objspace, zombie, (VALUE)table);
4476 }
4477}
4478
4479static void
4480finalize_list(rb_objspace_t *objspace, VALUE zombie)
4481{
4482 while (zombie) {
4483 VALUE next_zombie;
4484 struct heap_page *page;
4485 asan_unpoison_object(zombie, false);
4486 next_zombie = RZOMBIE(zombie)->next;
4487 page = GET_HEAP_PAGE(zombie);
4488
4489 run_final(objspace, zombie);
4490
4491 RB_VM_LOCK_ENTER();
4492 {
4493 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4494 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4495 obj_free_object_id(objspace, zombie);
4496 }
4497
4498 GC_ASSERT(heap_pages_final_slots > 0);
4499 GC_ASSERT(page->final_slots > 0);
4500
4501 heap_pages_final_slots--;
4502 page->final_slots--;
4503 page->free_slots++;
4504 heap_page_add_freeobj(objspace, page, zombie);
4505 page->size_pool->total_freed_objects++;
4506 }
4507 RB_VM_LOCK_LEAVE();
4508
4509 zombie = next_zombie;
4510 }
4511}
4512
4513static void
4514finalize_deferred_heap_pages(rb_objspace_t *objspace)
4515{
4516 VALUE zombie;
4517 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4518 finalize_list(objspace, zombie);
4519 }
4520}
4521
4522static void
4523finalize_deferred(rb_objspace_t *objspace)
4524{
4525 rb_execution_context_t *ec = GET_EC();
4526 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4527 finalize_deferred_heap_pages(objspace);
4528 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4529}
4530
4531static void
4532gc_finalize_deferred(void *dmy)
4533{
4534 rb_objspace_t *objspace = dmy;
4535 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4536
4537 finalize_deferred(objspace);
4538 ATOMIC_SET(finalizing, 0);
4539}
4540
4541static void
4542gc_finalize_deferred_register(rb_objspace_t *objspace)
4543{
4544 /* will enqueue a call to gc_finalize_deferred */
4545 rb_postponed_job_trigger(objspace->finalize_deferred_pjob);
4546}
4547
4548static int pop_mark_stack(mark_stack_t *stack, VALUE *data);
4549
4550static void
4551gc_abort(rb_objspace_t *objspace)
4552{
4553 if (is_incremental_marking(objspace)) {
4554 /* Remove all objects from the mark stack. */
4555 VALUE obj;
4556 while (pop_mark_stack(&objspace->mark_stack, &obj));
4557
4558 objspace->flags.during_incremental_marking = FALSE;
4559 }
4560
4561 if (is_lazy_sweeping(objspace)) {
4562 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4563 rb_size_pool_t *size_pool = &size_pools[i];
4564 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4565
4566 heap->sweeping_page = NULL;
4567 struct heap_page *page = NULL;
4568
4569 ccan_list_for_each(&heap->pages, page, page_node) {
4570 page->flags.before_sweep = false;
4571 }
4572 }
4573 }
4574
4575 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4576 rb_size_pool_t *size_pool = &size_pools[i];
4577 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4578 rgengc_mark_and_rememberset_clear(objspace, heap);
4579 }
4580
4581 gc_mode_set(objspace, gc_mode_none);
4582}
4583
4585 VALUE obj;
4586 VALUE table;
4587 struct force_finalize_list *next;
4588};
4589
4590static int
4591force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4592{
4593 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4594 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4595 curr->obj = key;
4596 curr->table = val;
4597 curr->next = *prev;
4598 *prev = curr;
4599 return ST_CONTINUE;
4600}
4601
4602bool rb_obj_is_main_ractor(VALUE gv);
4603
4604void
4605rb_objspace_free_objects(rb_objspace_t *objspace)
4606{
4607 for (size_t i = 0; i < heap_allocated_pages; i++) {
4608 struct heap_page *page = heap_pages_sorted[i];
4609 short stride = page->slot_size;
4610
4611 uintptr_t p = (uintptr_t)page->start;
4612 uintptr_t pend = p + page->total_slots * stride;
4613 for (; p < pend; p += stride) {
4614 VALUE vp = (VALUE)p;
4615 switch (BUILTIN_TYPE(vp)) {
4616 case T_DATA: {
4617 if (rb_obj_is_mutex(vp) || rb_obj_is_thread(vp) || rb_obj_is_main_ractor(vp)) {
4618 obj_free(objspace, vp);
4619 }
4620 break;
4621 }
4622 case T_ARRAY:
4623 obj_free(objspace, vp);
4624 break;
4625 default:
4626 break;
4627 }
4628 }
4629 }
4630}
4631
4632
4633void
4634rb_objspace_call_finalizer(rb_objspace_t *objspace)
4635{
4636 size_t i;
4637
4638#if RGENGC_CHECK_MODE >= 2
4639 gc_verify_internal_consistency(objspace);
4640#endif
4641 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4642
4643 /* run finalizers */
4644 finalize_deferred(objspace);
4645 GC_ASSERT(heap_pages_deferred_final == 0);
4646
4647 /* prohibit incremental GC */
4648 objspace->flags.dont_incremental = 1;
4649
4650 /* force to run finalizer */
4651 while (finalizer_table->num_entries) {
4652 struct force_finalize_list *list = 0;
4653 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4654 while (list) {
4655 struct force_finalize_list *curr = list;
4656 st_data_t obj = (st_data_t)curr->obj;
4657 run_finalizer(objspace, curr->obj, curr->table);
4658 st_delete(finalizer_table, &obj, 0);
4659 list = curr->next;
4660 xfree(curr);
4661 }
4662 }
4663
4664 /* Abort incremental marking and lazy sweeping to speed up shutdown. */
4665 gc_abort(objspace);
4666
4667 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4668 dont_gc_on();
4669
4670 /* running data/file finalizers are part of garbage collection */
4671 unsigned int lock_lev;
4672 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4673
4674 /* run data/file object's finalizers */
4675 for (i = 0; i < heap_allocated_pages; i++) {
4676 struct heap_page *page = heap_pages_sorted[i];
4677 short stride = page->slot_size;
4678
4679 uintptr_t p = (uintptr_t)page->start;
4680 uintptr_t pend = p + page->total_slots * stride;
4681 for (; p < pend; p += stride) {
4682 VALUE vp = (VALUE)p;
4683 void *poisoned = asan_unpoison_object_temporary(vp);
4684 switch (BUILTIN_TYPE(vp)) {
4685 case T_DATA:
4686 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4687 if (rb_obj_is_thread(vp)) break;
4688 if (rb_obj_is_mutex(vp)) break;
4689 if (rb_obj_is_fiber(vp)) break;
4690 if (rb_obj_is_main_ractor(vp)) break;
4691
4692 obj_free(objspace, vp);
4693 break;
4694 case T_FILE:
4695 obj_free(objspace, vp);
4696 break;
4697 case T_SYMBOL:
4698 case T_ARRAY:
4699 case T_NONE:
4700 break;
4701 default:
4702 if (rb_free_at_exit) {
4703 obj_free(objspace, vp);
4704 }
4705 break;
4706 }
4707 if (poisoned) {
4708 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4709 asan_poison_object(vp);
4710 }
4711 }
4712 }
4713
4714 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4715
4716 finalize_deferred_heap_pages(objspace);
4717
4718 st_free_table(finalizer_table);
4719 finalizer_table = 0;
4720 ATOMIC_SET(finalizing, 0);
4721}
4722
4723static inline int
4724is_swept_object(VALUE ptr)
4725{
4726 struct heap_page *page = GET_HEAP_PAGE(ptr);
4727 return page->flags.before_sweep ? FALSE : TRUE;
4728}
4729
4730/* garbage objects will be collected soon. */
4731static inline int
4732is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4733{
4734 if (!is_lazy_sweeping(objspace) ||
4735 is_swept_object(ptr) ||
4736 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4737
4738 return FALSE;
4739 }
4740 else {
4741 return TRUE;
4742 }
4743}
4744
4745static inline int
4746is_live_object(rb_objspace_t *objspace, VALUE ptr)
4747{
4748 switch (BUILTIN_TYPE(ptr)) {
4749 case T_NONE:
4750 case T_MOVED:
4751 case T_ZOMBIE:
4752 return FALSE;
4753 default:
4754 break;
4755 }
4756
4757 if (!is_garbage_object(objspace, ptr)) {
4758 return TRUE;
4759 }
4760 else {
4761 return FALSE;
4762 }
4763}
4764
4765static inline int
4766is_markable_object(VALUE obj)
4767{
4768 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4769 check_rvalue_consistency(obj);
4770 return TRUE;
4771}
4772
4773int
4774rb_objspace_markable_object_p(VALUE obj)
4775{
4776 rb_objspace_t *objspace = &rb_objspace;
4777 return is_markable_object(obj) && is_live_object(objspace, obj);
4778}
4779
4780int
4781rb_objspace_garbage_object_p(VALUE obj)
4782{
4783 rb_objspace_t *objspace = &rb_objspace;
4784 return is_garbage_object(objspace, obj);
4785}
4786
4787bool
4788rb_gc_is_ptr_to_obj(void *ptr)
4789{
4790 rb_objspace_t *objspace = &rb_objspace;
4791 return is_pointer_to_heap(objspace, ptr);
4792}
4793
4794VALUE
4795rb_gc_id2ref_obj_tbl(VALUE objid)
4796{
4797 rb_objspace_t *objspace = &rb_objspace;
4798
4799 VALUE orig;
4800 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4801 return orig;
4802 }
4803 else {
4804 return Qundef;
4805 }
4806}
4807
4808/*
4809 * call-seq:
4810 * ObjectSpace._id2ref(object_id) -> an_object
4811 *
4812 * Converts an object id to a reference to the object. May not be
4813 * called on an object id passed as a parameter to a finalizer.
4814 *
4815 * s = "I am a string" #=> "I am a string"
4816 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4817 * r == s #=> true
4818 *
4819 * On multi-ractor mode, if the object is not shareable, it raises
4820 * RangeError.
4821 */
4822
4823static VALUE
4824id2ref(VALUE objid)
4825{
4826#if SIZEOF_LONG == SIZEOF_VOIDP
4827#define NUM2PTR(x) NUM2ULONG(x)
4828#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4829#define NUM2PTR(x) NUM2ULL(x)
4830#endif
4831 rb_objspace_t *objspace = &rb_objspace;
4832 VALUE ptr;
4833 VALUE orig;
4834 void *p0;
4835
4836 objid = rb_to_int(objid);
4837 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4838 ptr = NUM2PTR(objid);
4839 if (ptr == Qtrue) return Qtrue;
4840 if (ptr == Qfalse) return Qfalse;
4841 if (NIL_P(ptr)) return Qnil;
4842 if (FIXNUM_P(ptr)) return (VALUE)ptr;
4843 if (FLONUM_P(ptr)) return (VALUE)ptr;
4844
4845 ptr = obj_id_to_ref(objid);
4846 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4847 ID symid = ptr / sizeof(RVALUE);
4848 p0 = (void *)ptr;
4849 if (!rb_static_id_valid_p(symid))
4850 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4851 return ID2SYM(symid);
4852 }
4853 }
4854
4855 if (!UNDEF_P(orig = rb_gc_id2ref_obj_tbl(objid)) &&
4856 is_live_object(objspace, orig)) {
4857
4858 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4859 return orig;
4860 }
4861 else {
4862 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4863 }
4864 }
4865
4866 if (rb_int_ge(objid, objspace->next_object_id)) {
4867 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4868 }
4869 else {
4870 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4871 }
4872}
4873
4874/* :nodoc: */
4875static VALUE
4876os_id2ref(VALUE os, VALUE objid)
4877{
4878 return id2ref(objid);
4879}
4880
4881static VALUE
4882rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4883{
4884 if (STATIC_SYM_P(obj)) {
4885 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4886 }
4887 else if (FLONUM_P(obj)) {
4888#if SIZEOF_LONG == SIZEOF_VOIDP
4889 return LONG2NUM((SIGNED_VALUE)obj);
4890#else
4891 return LL2NUM((SIGNED_VALUE)obj);
4892#endif
4893 }
4894 else if (SPECIAL_CONST_P(obj)) {
4895 return LONG2NUM((SIGNED_VALUE)obj);
4896 }
4897
4898 return get_heap_object_id(obj);
4899}
4900
4901static VALUE
4902cached_object_id(VALUE obj)
4903{
4904 VALUE id;
4905 rb_objspace_t *objspace = &rb_objspace;
4906
4907 RB_VM_LOCK_ENTER();
4908 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4909 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4910 }
4911 else {
4912 GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4913
4914 id = objspace->next_object_id;
4915 objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4916
4917 VALUE already_disabled = rb_gc_disable_no_rest();
4918 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4919 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4920 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4921 FL_SET(obj, FL_SEEN_OBJ_ID);
4922 }
4923 RB_VM_LOCK_LEAVE();
4924
4925 return id;
4926}
4927
4928static VALUE
4929nonspecial_obj_id(VALUE obj)
4930{
4931#if SIZEOF_LONG == SIZEOF_VOIDP
4932 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
4933#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4934 return LL2NUM((SIGNED_VALUE)(obj) / 2);
4935#else
4936# error not supported
4937#endif
4938}
4939
4940VALUE
4941rb_memory_id(VALUE obj)
4942{
4943 return rb_find_object_id(obj, nonspecial_obj_id);
4944}
4945
4946/*
4947 * Document-method: __id__
4948 * Document-method: object_id
4949 *
4950 * call-seq:
4951 * obj.__id__ -> integer
4952 * obj.object_id -> integer
4953 *
4954 * Returns an integer identifier for +obj+.
4955 *
4956 * The same number will be returned on all calls to +object_id+ for a given
4957 * object, and no two active objects will share an id.
4958 *
4959 * Note: that some objects of builtin classes are reused for optimization.
4960 * This is the case for immediate values and frozen string literals.
4961 *
4962 * BasicObject implements +__id__+, Kernel implements +object_id+.
4963 *
4964 * Immediate values are not passed by reference but are passed by value:
4965 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4966 *
4967 * Object.new.object_id == Object.new.object_id # => false
4968 * (21 * 2).object_id == (21 * 2).object_id # => true
4969 * "hello".object_id == "hello".object_id # => false
4970 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4971 */
4972
4973VALUE
4974rb_obj_id(VALUE obj)
4975{
4976 /*
4977 * 32-bit VALUE space
4978 * MSB ------------------------ LSB
4979 * false 00000000000000000000000000000000
4980 * true 00000000000000000000000000000010
4981 * nil 00000000000000000000000000000100
4982 * undef 00000000000000000000000000000110
4983 * symbol ssssssssssssssssssssssss00001110
4984 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4985 * fixnum fffffffffffffffffffffffffffffff1
4986 *
4987 * object_id space
4988 * LSB
4989 * false 00000000000000000000000000000000
4990 * true 00000000000000000000000000000010
4991 * nil 00000000000000000000000000000100
4992 * undef 00000000000000000000000000000110
4993 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4994 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4995 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4996 *
4997 * where A = sizeof(RVALUE)/4
4998 *
4999 * sizeof(RVALUE) is
5000 * 20 if 32-bit, double is 4-byte aligned
5001 * 24 if 32-bit, double is 8-byte aligned
5002 * 40 if 64-bit
5003 */
5004
5005 return rb_find_object_id(obj, cached_object_id);
5006}
5007
5008static enum rb_id_table_iterator_result
5009cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
5010{
5011 size_t *total_size = data_ptr;
5012 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
5013 *total_size += sizeof(*ccs);
5014 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
5015 return ID_TABLE_CONTINUE;
5016}
5017
5018static size_t
5019cc_table_memsize(struct rb_id_table *cc_table)
5020{
5021 size_t total = rb_id_table_memsize(cc_table);
5022 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
5023 return total;
5024}
5025
5026static size_t
5027obj_memsize_of(VALUE obj, int use_all_types)
5028{
5029 size_t size = 0;
5030
5031 if (SPECIAL_CONST_P(obj)) {
5032 return 0;
5033 }
5034
5035 if (FL_TEST(obj, FL_EXIVAR)) {
5036 size += rb_generic_ivar_memsize(obj);
5037 }
5038
5039 switch (BUILTIN_TYPE(obj)) {
5040 case T_OBJECT:
5041 if (rb_shape_obj_too_complex(obj)) {
5042 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
5043 }
5044 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
5045 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
5046 }
5047 break;
5048 case T_MODULE:
5049 case T_CLASS:
5050 if (RCLASS_M_TBL(obj)) {
5051 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5052 }
5053 // class IV sizes are allocated as powers of two
5054 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
5055 if (RCLASS_CVC_TBL(obj)) {
5056 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
5057 }
5058 if (RCLASS_EXT(obj)->const_tbl) {
5059 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
5060 }
5061 if (RCLASS_CC_TBL(obj)) {
5062 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5063 }
5064 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
5065 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
5066 }
5067 break;
5068 case T_ICLASS:
5069 if (RICLASS_OWNS_M_TBL_P(obj)) {
5070 if (RCLASS_M_TBL(obj)) {
5071 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5072 }
5073 }
5074 if (RCLASS_CC_TBL(obj)) {
5075 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5076 }
5077 break;
5078 case T_STRING:
5079 size += rb_str_memsize(obj);
5080 break;
5081 case T_ARRAY:
5082 size += rb_ary_memsize(obj);
5083 break;
5084 case T_HASH:
5085 if (RHASH_ST_TABLE_P(obj)) {
5086 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
5087 /* st_table is in the slot */
5088 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
5089 }
5090 break;
5091 case T_REGEXP:
5092 if (RREGEXP_PTR(obj)) {
5093 size += onig_memsize(RREGEXP_PTR(obj));
5094 }
5095 break;
5096 case T_DATA:
5097 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
5098 break;
5099 case T_MATCH:
5100 {
5101 rb_matchext_t *rm = RMATCH_EXT(obj);
5102 size += onig_region_memsize(&rm->regs);
5103 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
5104 }
5105 break;
5106 case T_FILE:
5107 if (RFILE(obj)->fptr) {
5108 size += rb_io_memsize(RFILE(obj)->fptr);
5109 }
5110 break;
5111 case T_RATIONAL:
5112 case T_COMPLEX:
5113 break;
5114 case T_IMEMO:
5115 size += imemo_memsize(obj);
5116 break;
5117
5118 case T_FLOAT:
5119 case T_SYMBOL:
5120 break;
5121
5122 case T_BIGNUM:
5123 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
5124 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
5125 }
5126 break;
5127
5128 case T_NODE:
5129 UNEXPECTED_NODE(obj_memsize_of);
5130 break;
5131
5132 case T_STRUCT:
5133 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
5134 RSTRUCT(obj)->as.heap.ptr) {
5135 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
5136 }
5137 break;
5138
5139 case T_ZOMBIE:
5140 case T_MOVED:
5141 break;
5142
5143 default:
5144 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
5145 BUILTIN_TYPE(obj), (void*)obj);
5146 }
5147
5148 return size + rb_gc_obj_slot_size(obj);
5149}
5150
5151size_t
5152rb_obj_memsize_of(VALUE obj)
5153{
5154 return obj_memsize_of(obj, TRUE);
5155}
5156
5157static int
5158set_zero(st_data_t key, st_data_t val, st_data_t arg)
5159{
5160 VALUE k = (VALUE)key;
5161 VALUE hash = (VALUE)arg;
5162 rb_hash_aset(hash, k, INT2FIX(0));
5163 return ST_CONTINUE;
5164}
5165
5166static VALUE
5167type_sym(size_t type)
5168{
5169 switch (type) {
5170#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5171 COUNT_TYPE(T_NONE);
5172 COUNT_TYPE(T_OBJECT);
5173 COUNT_TYPE(T_CLASS);
5174 COUNT_TYPE(T_MODULE);
5175 COUNT_TYPE(T_FLOAT);
5176 COUNT_TYPE(T_STRING);
5177 COUNT_TYPE(T_REGEXP);
5178 COUNT_TYPE(T_ARRAY);
5179 COUNT_TYPE(T_HASH);
5180 COUNT_TYPE(T_STRUCT);
5181 COUNT_TYPE(T_BIGNUM);
5182 COUNT_TYPE(T_FILE);
5183 COUNT_TYPE(T_DATA);
5184 COUNT_TYPE(T_MATCH);
5185 COUNT_TYPE(T_COMPLEX);
5186 COUNT_TYPE(T_RATIONAL);
5187 COUNT_TYPE(T_NIL);
5188 COUNT_TYPE(T_TRUE);
5189 COUNT_TYPE(T_FALSE);
5190 COUNT_TYPE(T_SYMBOL);
5191 COUNT_TYPE(T_FIXNUM);
5192 COUNT_TYPE(T_IMEMO);
5193 COUNT_TYPE(T_UNDEF);
5194 COUNT_TYPE(T_NODE);
5195 COUNT_TYPE(T_ICLASS);
5196 COUNT_TYPE(T_ZOMBIE);
5197 COUNT_TYPE(T_MOVED);
5198#undef COUNT_TYPE
5199 default: return SIZET2NUM(type); break;
5200 }
5201}
5202
5203/*
5204 * call-seq:
5205 * ObjectSpace.count_objects([result_hash]) -> hash
5206 *
5207 * Counts all objects grouped by type.
5208 *
5209 * It returns a hash, such as:
5210 * {
5211 * :TOTAL=>10000,
5212 * :FREE=>3011,
5213 * :T_OBJECT=>6,
5214 * :T_CLASS=>404,
5215 * # ...
5216 * }
5217 *
5218 * The contents of the returned hash are implementation specific.
5219 * It may be changed in future.
5220 *
5221 * The keys starting with +:T_+ means live objects.
5222 * For example, +:T_ARRAY+ is the number of arrays.
5223 * +:FREE+ means object slots which is not used now.
5224 * +:TOTAL+ means sum of above.
5225 *
5226 * If the optional argument +result_hash+ is given,
5227 * it is overwritten and returned. This is intended to avoid probe effect.
5228 *
5229 * h = {}
5230 * ObjectSpace.count_objects(h)
5231 * puts h
5232 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
5233 *
5234 * This method is only expected to work on C Ruby.
5235 *
5236 */
5237
5238static VALUE
5239count_objects(int argc, VALUE *argv, VALUE os)
5240{
5241 rb_objspace_t *objspace = &rb_objspace;
5242 size_t counts[T_MASK+1];
5243 size_t freed = 0;
5244 size_t total = 0;
5245 size_t i;
5246 VALUE hash = Qnil;
5247
5248 if (rb_check_arity(argc, 0, 1) == 1) {
5249 hash = argv[0];
5250 if (!RB_TYPE_P(hash, T_HASH))
5251 rb_raise(rb_eTypeError, "non-hash given");
5252 }
5253
5254 for (i = 0; i <= T_MASK; i++) {
5255 counts[i] = 0;
5256 }
5257
5258 for (i = 0; i < heap_allocated_pages; i++) {
5259 struct heap_page *page = heap_pages_sorted[i];
5260 short stride = page->slot_size;
5261
5262 uintptr_t p = (uintptr_t)page->start;
5263 uintptr_t pend = p + page->total_slots * stride;
5264 for (;p < pend; p += stride) {
5265 VALUE vp = (VALUE)p;
5266 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5267
5268 void *poisoned = asan_unpoison_object_temporary(vp);
5269 if (RANY(p)->as.basic.flags) {
5270 counts[BUILTIN_TYPE(vp)]++;
5271 }
5272 else {
5273 freed++;
5274 }
5275 if (poisoned) {
5276 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
5277 asan_poison_object(vp);
5278 }
5279 }
5280 total += page->total_slots;
5281 }
5282
5283 if (NIL_P(hash)) {
5284 hash = rb_hash_new();
5285 }
5286 else if (!RHASH_EMPTY_P(hash)) {
5287 rb_hash_stlike_foreach(hash, set_zero, hash);
5288 }
5289 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
5290 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
5291
5292 for (i = 0; i <= T_MASK; i++) {
5293 VALUE type = type_sym(i);
5294 if (counts[i])
5295 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
5296 }
5297
5298 return hash;
5299}
5300
5301/*
5302 ------------------------ Garbage Collection ------------------------
5303*/
5304
5305/* Sweeping */
5306
5307static size_t
5308objspace_available_slots(rb_objspace_t *objspace)
5309{
5310 size_t total_slots = 0;
5311 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5312 rb_size_pool_t *size_pool = &size_pools[i];
5313 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5314 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5315 }
5316 return total_slots;
5317}
5318
5319static size_t
5320objspace_live_slots(rb_objspace_t *objspace)
5321{
5322 return total_allocated_objects(objspace) - total_freed_objects(objspace) - heap_pages_final_slots;
5323}
5324
5325static size_t
5326objspace_free_slots(rb_objspace_t *objspace)
5327{
5328 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5329}
5330
5331static void
5332gc_setup_mark_bits(struct heap_page *page)
5333{
5334 /* copy oldgen bitmap to mark bitmap */
5335 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5336}
5337
5338static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
5339static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
5340
5341#if defined(_WIN32)
5342enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5343
5344static BOOL
5345protect_page_body(struct heap_page_body *body, DWORD protect)
5346{
5347 DWORD old_protect;
5348 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5349}
5350#else
5351enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5352#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5353#endif
5354
5355static void
5356lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5357{
5358 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5359 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
5360 }
5361 else {
5362 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
5363 }
5364}
5365
5366static void
5367unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5368{
5369 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5370 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
5371 }
5372 else {
5373 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
5374 }
5375}
5376
5377static bool
5378try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
5379{
5380 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5381
5382 struct heap_page *src_page = GET_HEAP_PAGE(src);
5383 if (!free_page) {
5384 return false;
5385 }
5386
5387 /* We should return true if either src is successfully moved, or src is
5388 * unmoveable. A false return will cause the sweeping cursor to be
5389 * incremented to the next page, and src will attempt to move again */
5390 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5391
5392 asan_unlock_freelist(free_page);
5393 VALUE dest = (VALUE)free_page->freelist;
5394 asan_lock_freelist(free_page);
5395 asan_unpoison_object(dest, false);
5396 if (!dest) {
5397 /* if we can't get something from the freelist then the page must be
5398 * full */
5399 return false;
5400 }
5401 asan_unlock_freelist(free_page);
5402 free_page->freelist = RANY(dest)->as.free.next;
5403 asan_lock_freelist(free_page);
5404
5405 GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
5406
5407 if (src_page->slot_size > free_page->slot_size) {
5408 objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
5409 }
5410 else if (free_page->slot_size > src_page->slot_size) {
5411 objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
5412 }
5413 objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
5414 objspace->rcompactor.total_moved++;
5415
5416 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5417 gc_pin(objspace, src);
5418 free_page->free_slots--;
5419
5420 return true;
5421}
5422
5423static void
5424gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
5425{
5426 struct heap_page *cursor = heap->compact_cursor;
5427
5428 while (cursor) {
5429 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5430 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5431 }
5432}
5433
5434static void gc_update_references(rb_objspace_t * objspace);
5435#if GC_CAN_COMPILE_COMPACTION
5436static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
5437#endif
5438
5439#if defined(__MINGW32__) || defined(_WIN32)
5440# define GC_COMPACTION_SUPPORTED 1
5441#else
5442/* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
5443 * the read barrier, so we must disable compaction. */
5444# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5445#endif
5446
5447#if GC_CAN_COMPILE_COMPACTION
5448static void
5449read_barrier_handler(uintptr_t original_address)
5450{
5451 VALUE obj;
5452 rb_objspace_t * objspace = &rb_objspace;
5453
5454 /* Calculate address aligned to slots. */
5455 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5456
5457 obj = (VALUE)address;
5458
5459 struct heap_page_body *page_body = GET_PAGE_BODY(obj);
5460
5461 /* If the page_body is NULL, then mprotect cannot handle it and will crash
5462 * with "Cannot allocate memory". */
5463 if (page_body == NULL) {
5464 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
5465 }
5466
5467 RB_VM_LOCK_ENTER();
5468 {
5469 unlock_page_body(objspace, page_body);
5470
5471 objspace->profile.read_barrier_faults++;
5472
5473 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5474 }
5475 RB_VM_LOCK_LEAVE();
5476}
5477#endif
5478
5479#if !GC_CAN_COMPILE_COMPACTION
5480static void
5481uninstall_handlers(void)
5482{
5483 /* no-op */
5484}
5485
5486static void
5487install_handlers(void)
5488{
5489 /* no-op */
5490}
5491#elif defined(_WIN32)
5492static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5493typedef void (*signal_handler)(int);
5494static signal_handler old_sigsegv_handler;
5495
5496static LONG WINAPI
5497read_barrier_signal(EXCEPTION_POINTERS * info)
5498{
5499 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5500 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5501 /* > The second array element specifies the virtual address of the inaccessible data.
5502 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5503 *
5504 * Use this address to invalidate the page */
5505 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5506 return EXCEPTION_CONTINUE_EXECUTION;
5507 }
5508 else {
5509 return EXCEPTION_CONTINUE_SEARCH;
5510 }
5511}
5512
5513static void
5514uninstall_handlers(void)
5515{
5516 signal(SIGSEGV, old_sigsegv_handler);
5517 SetUnhandledExceptionFilter(old_handler);
5518}
5519
5520static void
5521install_handlers(void)
5522{
5523 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5524 old_sigsegv_handler = signal(SIGSEGV, NULL);
5525 /* Unhandled Exception Filter has access to the violation address similar
5526 * to si_addr from sigaction */
5527 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5528}
5529#else
5530static struct sigaction old_sigbus_handler;
5531static struct sigaction old_sigsegv_handler;
5532
5533#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5534static exception_mask_t old_exception_masks[32];
5535static mach_port_t old_exception_ports[32];
5536static exception_behavior_t old_exception_behaviors[32];
5537static thread_state_flavor_t old_exception_flavors[32];
5538static mach_msg_type_number_t old_exception_count;
5539
5540static void
5541disable_mach_bad_access_exc(void)
5542{
5543 old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
5544 task_swap_exception_ports(
5545 mach_task_self(), EXC_MASK_BAD_ACCESS,
5546 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5547 old_exception_masks, &old_exception_count,
5548 old_exception_ports, old_exception_behaviors, old_exception_flavors
5549 );
5550}
5551
5552static void
5553restore_mach_bad_access_exc(void)
5554{
5555 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5556 task_set_exception_ports(
5557 mach_task_self(),
5558 old_exception_masks[i], old_exception_ports[i],
5559 old_exception_behaviors[i], old_exception_flavors[i]
5560 );
5561 }
5562}
5563#endif
5564
5565static void
5566read_barrier_signal(int sig, siginfo_t * info, void * data)
5567{
5568 // setup SEGV/BUS handlers for errors
5569 struct sigaction prev_sigbus, prev_sigsegv;
5570 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5571 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5572
5573 // enable SIGBUS/SEGV
5574 sigset_t set, prev_set;
5575 sigemptyset(&set);
5576 sigaddset(&set, SIGBUS);
5577 sigaddset(&set, SIGSEGV);
5578 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5579#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5580 disable_mach_bad_access_exc();
5581#endif
5582 // run handler
5583 read_barrier_handler((uintptr_t)info->si_addr);
5584
5585 // reset SEGV/BUS handlers
5586#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5587 restore_mach_bad_access_exc();
5588#endif
5589 sigaction(SIGBUS, &prev_sigbus, NULL);
5590 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5591 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5592}
5593
5594static void
5595uninstall_handlers(void)
5596{
5597#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5598 restore_mach_bad_access_exc();
5599#endif
5600 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5601 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5602}
5603
5604static void
5605install_handlers(void)
5606{
5607 struct sigaction action;
5608 memset(&action, 0, sizeof(struct sigaction));
5609 sigemptyset(&action.sa_mask);
5610 action.sa_sigaction = read_barrier_signal;
5611 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5612
5613 sigaction(SIGBUS, &action, &old_sigbus_handler);
5614 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5615#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5616 disable_mach_bad_access_exc();
5617#endif
5618}
5619#endif
5620
5621static void
5622gc_compact_finish(rb_objspace_t *objspace)
5623{
5624 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5625 rb_size_pool_t *size_pool = &size_pools[i];
5626 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5627 gc_unprotect_pages(objspace, heap);
5628 }
5629
5630 uninstall_handlers();
5631
5632 gc_update_references(objspace);
5633 objspace->profile.compact_count++;
5634
5635 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5636 rb_size_pool_t *size_pool = &size_pools[i];
5637 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5638 heap->compact_cursor = NULL;
5639 heap->free_pages = NULL;
5640 heap->compact_cursor_index = 0;
5641 }
5642
5643 if (gc_prof_enabled(objspace)) {
5644 gc_profile_record *record = gc_prof_record(objspace);
5645 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5646 }
5647 objspace->flags.during_compacting = FALSE;
5648}
5649
5651 struct heap_page *page;
5652 int final_slots;
5653 int freed_slots;
5654 int empty_slots;
5655};
5656
5657static inline void
5658gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5659{
5660 struct heap_page * sweep_page = ctx->page;
5661 short slot_size = sweep_page->slot_size;
5662 short slot_bits = slot_size / BASE_SLOT_SIZE;
5663 GC_ASSERT(slot_bits > 0);
5664
5665 do {
5666 VALUE vp = (VALUE)p;
5667 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5668
5669 asan_unpoison_object(vp, false);
5670 if (bitset & 1) {
5671 switch (BUILTIN_TYPE(vp)) {
5672 default: /* majority case */
5673 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5674#if RGENGC_CHECK_MODE
5675 if (!is_full_marking(objspace)) {
5676 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5677 if (RVALUE_REMEMBERED(vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5678 }
5679#endif
5680 if (obj_free(objspace, vp)) {
5681 // always add free slots back to the swept pages freelist,
5682 // so that if we're comapacting, we can re-use the slots
5683 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
5684 heap_page_add_freeobj(objspace, sweep_page, vp);
5685 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5686 ctx->freed_slots++;
5687 }
5688 else {
5689 ctx->final_slots++;
5690 }
5691 break;
5692
5693 case T_MOVED:
5694 if (objspace->flags.during_compacting) {
5695 /* The sweep cursor shouldn't have made it to any
5696 * T_MOVED slots while the compact flag is enabled.
5697 * The sweep cursor and compact cursor move in
5698 * opposite directions, and when they meet references will
5699 * get updated and "during_compacting" should get disabled */
5700 rb_bug("T_MOVED shouldn't be seen until compaction is finished");
5701 }
5702 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5703 ctx->empty_slots++;
5704 heap_page_add_freeobj(objspace, sweep_page, vp);
5705 break;
5706 case T_ZOMBIE:
5707 /* already counted */
5708 break;
5709 case T_NONE:
5710 ctx->empty_slots++; /* already freed */
5711 break;
5712 }
5713 }
5714 p += slot_size;
5715 bitset >>= slot_bits;
5716 } while (bitset);
5717}
5718
5719static inline void
5720gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
5721{
5722 struct heap_page *sweep_page = ctx->page;
5723 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5724
5725 uintptr_t p;
5726 bits_t *bits, bitset;
5727
5728 gc_report(2, objspace, "page_sweep: start.\n");
5729
5730#if RGENGC_CHECK_MODE
5731 if (!objspace->flags.immediate_sweep) {
5732 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5733 }
5734#endif
5735 sweep_page->flags.before_sweep = FALSE;
5736 sweep_page->free_slots = 0;
5737
5738 p = (uintptr_t)sweep_page->start;
5739 bits = sweep_page->mark_bits;
5740
5741 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5742 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5743 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5744 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5745 }
5746
5747 /* The last bitmap plane may not be used if the last plane does not
5748 * have enough space for the slot_size. In that case, the last plane must
5749 * be skipped since none of the bits will be set. */
5750 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5751 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5752 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5753
5754 // Skip out of range slots at the head of the page
5755 bitset = ~bits[0];
5756 bitset >>= NUM_IN_PAGE(p);
5757 if (bitset) {
5758 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5759 }
5760 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5761
5762 for (int i = 1; i < bitmap_plane_count; i++) {
5763 bitset = ~bits[i];
5764 if (bitset) {
5765 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5766 }
5767 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5768 }
5769
5770 if (!heap->compact_cursor) {
5771 gc_setup_mark_bits(sweep_page);
5772 }
5773
5774#if GC_PROFILE_MORE_DETAIL
5775 if (gc_prof_enabled(objspace)) {
5776 gc_profile_record *record = gc_prof_record(objspace);
5777 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5778 record->empty_objects += ctx->empty_slots;
5779 }
5780#endif
5781 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5782 rb_gc_count(),
5783 sweep_page->total_slots,
5784 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5785
5786 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5787 sweep_page->size_pool->total_freed_objects += ctx->freed_slots;
5788
5789 if (heap_pages_deferred_final && !finalizing) {
5790 rb_thread_t *th = GET_THREAD();
5791 if (th) {
5792 gc_finalize_deferred_register(objspace);
5793 }
5794 }
5795
5796#if RGENGC_CHECK_MODE
5797 short freelist_len = 0;
5798 asan_unlock_freelist(sweep_page);
5799 RVALUE *ptr = sweep_page->freelist;
5800 while (ptr) {
5801 freelist_len++;
5802 ptr = ptr->as.free.next;
5803 }
5804 asan_lock_freelist(sweep_page);
5805 if (freelist_len != sweep_page->free_slots) {
5806 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5807 }
5808#endif
5809
5810 gc_report(2, objspace, "page_sweep: end.\n");
5811}
5812
5813static const char *
5814gc_mode_name(enum gc_mode mode)
5815{
5816 switch (mode) {
5817 case gc_mode_none: return "none";
5818 case gc_mode_marking: return "marking";
5819 case gc_mode_sweeping: return "sweeping";
5820 case gc_mode_compacting: return "compacting";
5821 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5822 }
5823}
5824
5825static void
5826gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5827{
5828#if RGENGC_CHECK_MODE
5829 enum gc_mode prev_mode = gc_mode(objspace);
5830 switch (prev_mode) {
5831 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5832 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5833 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
5834 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
5835 }
5836#endif
5837 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5838 gc_mode_set(objspace, mode);
5839}
5840
5841static void
5842heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5843{
5844 if (freelist) {
5845 asan_unlock_freelist(page);
5846 if (page->freelist) {
5847 RVALUE *p = page->freelist;
5848 asan_unpoison_object((VALUE)p, false);
5849 while (p->as.free.next) {
5850 RVALUE *prev = p;
5851 p = p->as.free.next;
5852 asan_poison_object((VALUE)prev);
5853 asan_unpoison_object((VALUE)p, false);
5854 }
5855 p->as.free.next = freelist;
5856 asan_poison_object((VALUE)p);
5857 }
5858 else {
5859 page->freelist = freelist;
5860 }
5861 asan_lock_freelist(page);
5862 }
5863}
5864
5865static void
5866gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5867{
5868 heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
5869 heap->free_pages = NULL;
5870 heap->pooled_pages = NULL;
5871 if (!objspace->flags.immediate_sweep) {
5872 struct heap_page *page = NULL;
5873
5874 ccan_list_for_each(&heap->pages, page, page_node) {
5875 page->flags.before_sweep = TRUE;
5876 }
5877 }
5878}
5879
5880#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5881__attribute__((noinline))
5882#endif
5883
5884#if GC_CAN_COMPILE_COMPACTION
5885static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
5886static int compare_pinned_slots(const void *left, const void *right, void *d);
5887#endif
5888
5889static void
5890gc_sweep_start(rb_objspace_t *objspace)
5891{
5892 gc_mode_transition(objspace, gc_mode_sweeping);
5893 objspace->rincgc.pooled_slots = 0;
5894
5895#if GC_CAN_COMPILE_COMPACTION
5896 if (objspace->flags.during_compacting) {
5897 gc_sort_heap_by_compare_func(
5898 objspace,
5899 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
5900 );
5901 }
5902#endif
5903
5904 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5905 rb_size_pool_t *size_pool = &size_pools[i];
5906 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5907
5908 gc_sweep_start_heap(objspace, heap);
5909
5910 /* We should call gc_sweep_finish_size_pool for size pools with no pages. */
5911 if (heap->sweeping_page == NULL) {
5912 GC_ASSERT(heap->total_pages == 0);
5913 GC_ASSERT(heap->total_slots == 0);
5914 gc_sweep_finish_size_pool(objspace, size_pool);
5915 }
5916 }
5917
5918 rb_ractor_t *r = NULL;
5919 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5920 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5921 }
5922}
5923
5924static void
5925gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5926{
5927 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5928 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5929 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5930 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5931
5932 size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools];
5933 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
5934
5935 /* If we don't have enough slots and we have pages on the tomb heap, move
5936 * pages from the tomb heap to the eden heap. This may prevent page
5937 * creation thrashing (frequently allocating and deallocting pages) and
5938 * GC thrashing (running GC more frequently than required). */
5939 struct heap_page *resurrected_page;
5940 while (swept_slots < min_free_slots &&
5941 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5942 swept_slots += resurrected_page->free_slots;
5943
5944 heap_add_page(objspace, size_pool, heap, resurrected_page);
5945 heap_add_freepage(heap, resurrected_page);
5946 }
5947
5948 if (swept_slots < min_free_slots) {
5949 bool grow_heap = is_full_marking(objspace);
5950
5951 /* Consider growing or starting a major GC if we are not currently in a
5952 * major GC and we can't allocate any more pages. */
5953 if (!is_full_marking(objspace) && size_pool->allocatable_pages == 0) {
5954 /* The heap is a growth heap if it freed more slots than had empty slots. */
5955 bool is_growth_heap = size_pool->empty_slots == 0 || size_pool->freed_slots > size_pool->empty_slots;
5956
5957 /* Grow this heap if we haven't run at least RVALUE_OLD_AGE minor
5958 * GC since the last major GC or if this heap is smaller than the
5959 * the configured initial size. */
5960 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE ||
5961 total_slots < init_slots) {
5962 grow_heap = TRUE;
5963 }
5964 else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5965 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5966 size_pool->force_major_gc_count++;
5967 }
5968 }
5969
5970 if (grow_heap) {
5971 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5972
5973 if (extend_page_count > size_pool->allocatable_pages) {
5974 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5975 }
5976 }
5977 }
5978}
5979
5980static void
5981gc_sweep_finish(rb_objspace_t *objspace)
5982{
5983 gc_report(1, objspace, "gc_sweep_finish\n");
5984
5985 gc_prof_set_heap_info(objspace);
5986 heap_pages_free_unused_pages(objspace);
5987
5988 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5989 rb_size_pool_t *size_pool = &size_pools[i];
5990
5991 /* if heap_pages has unused pages, then assign them to increment */
5992 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5993 if (size_pool->allocatable_pages < tomb_pages) {
5994 size_pool->allocatable_pages = tomb_pages;
5995 }
5996
5997 size_pool->freed_slots = 0;
5998 size_pool->empty_slots = 0;
5999
6000 if (!will_be_incremental_marking(objspace)) {
6001 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
6002 struct heap_page *end_page = eden_heap->free_pages;
6003 if (end_page) {
6004 while (end_page->free_next) end_page = end_page->free_next;
6005 end_page->free_next = eden_heap->pooled_pages;
6006 }
6007 else {
6008 eden_heap->free_pages = eden_heap->pooled_pages;
6009 }
6010 eden_heap->pooled_pages = NULL;
6011 objspace->rincgc.pooled_slots = 0;
6012 }
6013 }
6014 heap_pages_expand_sorted(objspace);
6015
6016 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
6017 gc_mode_transition(objspace, gc_mode_none);
6018
6019#if RGENGC_CHECK_MODE >= 2
6020 gc_verify_internal_consistency(objspace);
6021#endif
6022}
6023
6024static int
6025gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
6026{
6027 struct heap_page *sweep_page = heap->sweeping_page;
6028 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
6029 int swept_slots = 0;
6030 int pooled_slots = 0;
6031
6032 if (sweep_page == NULL) return FALSE;
6033
6034#if GC_ENABLE_LAZY_SWEEP
6035 gc_prof_sweep_timer_start(objspace);
6036#endif
6037
6038 do {
6039 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
6040
6041 struct gc_sweep_context ctx = {
6042 .page = sweep_page,
6043 .final_slots = 0,
6044 .freed_slots = 0,
6045 .empty_slots = 0,
6046 };
6047 gc_sweep_page(objspace, heap, &ctx);
6048 int free_slots = ctx.freed_slots + ctx.empty_slots;
6049
6050 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
6051
6052 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
6053 heap_pages_freeable_pages > 0 &&
6054 unlink_limit > 0) {
6055 heap_pages_freeable_pages--;
6056 unlink_limit--;
6057 /* there are no living objects -> move this page to tomb heap */
6058 heap_unlink_page(objspace, heap, sweep_page);
6059 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6060 }
6061 else if (free_slots > 0) {
6062 size_pool->freed_slots += ctx.freed_slots;
6063 size_pool->empty_slots += ctx.empty_slots;
6064
6065 if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
6066 heap_add_poolpage(objspace, heap, sweep_page);
6067 pooled_slots += free_slots;
6068 }
6069 else {
6070 heap_add_freepage(heap, sweep_page);
6071 swept_slots += free_slots;
6072 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6073 break;
6074 }
6075 }
6076 }
6077 else {
6078 sweep_page->free_next = NULL;
6079 }
6080 } while ((sweep_page = heap->sweeping_page));
6081
6082 if (!heap->sweeping_page) {
6083 gc_sweep_finish_size_pool(objspace, size_pool);
6084
6085 if (!has_sweeping_pages(objspace)) {
6086 gc_sweep_finish(objspace);
6087 }
6088 }
6089
6090#if GC_ENABLE_LAZY_SWEEP
6091 gc_prof_sweep_timer_stop(objspace);
6092#endif
6093
6094 return heap->free_pages != NULL;
6095}
6096
6097static void
6098gc_sweep_rest(rb_objspace_t *objspace)
6099{
6100 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6101 rb_size_pool_t *size_pool = &size_pools[i];
6102
6103 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6104 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6105 }
6106 }
6107}
6108
6109static void
6110gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
6111{
6112 GC_ASSERT(dont_gc_val() == FALSE);
6113 if (!GC_ENABLE_LAZY_SWEEP) return;
6114
6115 gc_sweeping_enter(objspace);
6116
6117 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6118 rb_size_pool_t *size_pool = &size_pools[i];
6119 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6120 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
6121 if (size_pool == sweep_size_pool) {
6122 if (size_pool->allocatable_pages > 0) {
6123 heap_increment(objspace, size_pool, heap);
6124 }
6125 else {
6126 /* Not allowed to create a new page so finish sweeping. */
6127 gc_sweep_rest(objspace);
6128 break;
6129 }
6130 }
6131 }
6132 }
6133
6134 gc_sweeping_exit(objspace);
6135}
6136
6137#if GC_CAN_COMPILE_COMPACTION
6138static void
6139invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
6140{
6141 if (bitset) {
6142 do {
6143 if (bitset & 1) {
6144 VALUE forwarding_object = (VALUE)p;
6145 VALUE object;
6146
6147 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
6148 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6149 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6150
6151 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6152
6153 object = rb_gc_location(forwarding_object);
6154
6155 shape_id_t original_shape_id = 0;
6156 if (RB_TYPE_P(object, T_OBJECT)) {
6157 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6158 }
6159
6160 gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
6161 /* forwarding_object is now our actual object, and "object"
6162 * is the free slot for the original page */
6163
6164 if (original_shape_id) {
6165 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6166 }
6167
6168 struct heap_page *orig_page = GET_HEAP_PAGE(object);
6169 orig_page->free_slots++;
6170 heap_page_add_freeobj(objspace, orig_page, object);
6171
6172 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6173 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
6174 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
6175 }
6176 }
6177 p += BASE_SLOT_SIZE;
6178 bitset >>= 1;
6179 } while (bitset);
6180 }
6181}
6182
6183static void
6184invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
6185{
6186 int i;
6187 bits_t *mark_bits, *pin_bits;
6188 bits_t bitset;
6189
6190 mark_bits = page->mark_bits;
6191 pin_bits = page->pinned_bits;
6192
6193 uintptr_t p = page->start;
6194
6195 // Skip out of range slots at the head of the page
6196 bitset = pin_bits[0] & ~mark_bits[0];
6197 bitset >>= NUM_IN_PAGE(p);
6198 invalidate_moved_plane(objspace, page, p, bitset);
6199 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6200
6201 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6202 /* Moved objects are pinned but never marked. We reuse the pin bits
6203 * to indicate there is a moved object in this slot. */
6204 bitset = pin_bits[i] & ~mark_bits[i];
6205
6206 invalidate_moved_plane(objspace, page, p, bitset);
6207 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6208 }
6209}
6210#endif
6211
6212static void
6213gc_compact_start(rb_objspace_t *objspace)
6214{
6215 struct heap_page *page = NULL;
6216 gc_mode_transition(objspace, gc_mode_compacting);
6217
6218 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6219 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6220 ccan_list_for_each(&heap->pages, page, page_node) {
6221 page->flags.before_sweep = TRUE;
6222 }
6223
6224 heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
6225 heap->compact_cursor_index = 0;
6226 }
6227
6228 if (gc_prof_enabled(objspace)) {
6229 gc_profile_record *record = gc_prof_record(objspace);
6230 record->moved_objects = objspace->rcompactor.total_moved;
6231 }
6232
6233 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
6234 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
6235 memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
6236 memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
6237
6238 /* Set up read barrier for pages containing MOVED objects */
6239 install_handlers();
6240}
6241
6242static void gc_sweep_compact(rb_objspace_t *objspace);
6243
6244static void
6245gc_sweep(rb_objspace_t *objspace)
6246{
6247 gc_sweeping_enter(objspace);
6248
6249 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6250
6251 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
6252
6253 gc_sweep_start(objspace);
6254 if (objspace->flags.during_compacting) {
6255 gc_sweep_compact(objspace);
6256 }
6257
6258 if (immediate_sweep) {
6259#if !GC_ENABLE_LAZY_SWEEP
6260 gc_prof_sweep_timer_start(objspace);
6261#endif
6262 gc_sweep_rest(objspace);
6263#if !GC_ENABLE_LAZY_SWEEP
6264 gc_prof_sweep_timer_stop(objspace);
6265#endif
6266 }
6267 else {
6268
6269 /* Sweep every size pool. */
6270 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6271 rb_size_pool_t *size_pool = &size_pools[i];
6272 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6273 }
6274 }
6275
6276 gc_sweeping_exit(objspace);
6277}
6278
6279/* Marking - Marking stack */
6280
6281static stack_chunk_t *
6282stack_chunk_alloc(void)
6283{
6284 stack_chunk_t *res;
6285
6286 res = malloc(sizeof(stack_chunk_t));
6287 if (!res)
6288 rb_memerror();
6289
6290 return res;
6291}
6292
6293static inline int
6294is_mark_stack_empty(mark_stack_t *stack)
6295{
6296 return stack->chunk == NULL;
6297}
6298
6299static size_t
6300mark_stack_size(mark_stack_t *stack)
6301{
6302 size_t size = stack->index;
6303 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6304
6305 while (chunk) {
6306 size += stack->limit;
6307 chunk = chunk->next;
6308 }
6309 return size;
6310}
6311
6312static void
6313add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
6314{
6315 chunk->next = stack->cache;
6316 stack->cache = chunk;
6317 stack->cache_size++;
6318}
6319
6320static void
6321shrink_stack_chunk_cache(mark_stack_t *stack)
6322{
6323 stack_chunk_t *chunk;
6324
6325 if (stack->unused_cache_size > (stack->cache_size/2)) {
6326 chunk = stack->cache;
6327 stack->cache = stack->cache->next;
6328 stack->cache_size--;
6329 free(chunk);
6330 }
6331 stack->unused_cache_size = stack->cache_size;
6332}
6333
6334static void
6335push_mark_stack_chunk(mark_stack_t *stack)
6336{
6337 stack_chunk_t *next;
6338
6339 GC_ASSERT(stack->index == stack->limit);
6340
6341 if (stack->cache_size > 0) {
6342 next = stack->cache;
6343 stack->cache = stack->cache->next;
6344 stack->cache_size--;
6345 if (stack->unused_cache_size > stack->cache_size)
6346 stack->unused_cache_size = stack->cache_size;
6347 }
6348 else {
6349 next = stack_chunk_alloc();
6350 }
6351 next->next = stack->chunk;
6352 stack->chunk = next;
6353 stack->index = 0;
6354}
6355
6356static void
6357pop_mark_stack_chunk(mark_stack_t *stack)
6358{
6359 stack_chunk_t *prev;
6360
6361 prev = stack->chunk->next;
6362 GC_ASSERT(stack->index == 0);
6363 add_stack_chunk_cache(stack, stack->chunk);
6364 stack->chunk = prev;
6365 stack->index = stack->limit;
6366}
6367
6368static void
6369mark_stack_chunk_list_free(stack_chunk_t *chunk)
6370{
6371 stack_chunk_t *next = NULL;
6372
6373 while (chunk != NULL) {
6374 next = chunk->next;
6375 free(chunk);
6376 chunk = next;
6377 }
6378}
6379
6380static void
6381free_stack_chunks(mark_stack_t *stack)
6382{
6383 mark_stack_chunk_list_free(stack->chunk);
6384}
6385
6386static void
6387mark_stack_free_cache(mark_stack_t *stack)
6388{
6389 mark_stack_chunk_list_free(stack->cache);
6390 stack->cache_size = 0;
6391 stack->unused_cache_size = 0;
6392}
6393
6394static void
6395push_mark_stack(mark_stack_t *stack, VALUE data)
6396{
6397 VALUE obj = data;
6398 switch (BUILTIN_TYPE(obj)) {
6399 case T_OBJECT:
6400 case T_CLASS:
6401 case T_MODULE:
6402 case T_FLOAT:
6403 case T_STRING:
6404 case T_REGEXP:
6405 case T_ARRAY:
6406 case T_HASH:
6407 case T_STRUCT:
6408 case T_BIGNUM:
6409 case T_FILE:
6410 case T_DATA:
6411 case T_MATCH:
6412 case T_COMPLEX:
6413 case T_RATIONAL:
6414 case T_TRUE:
6415 case T_FALSE:
6416 case T_SYMBOL:
6417 case T_IMEMO:
6418 case T_ICLASS:
6419 if (stack->index == stack->limit) {
6420 push_mark_stack_chunk(stack);
6421 }
6422 stack->chunk->data[stack->index++] = data;
6423 return;
6424
6425 case T_NONE:
6426 case T_NIL:
6427 case T_FIXNUM:
6428 case T_MOVED:
6429 case T_ZOMBIE:
6430 case T_UNDEF:
6431 case T_MASK:
6432 rb_bug("push_mark_stack() called for broken object");
6433 break;
6434
6435 case T_NODE:
6436 UNEXPECTED_NODE(push_mark_stack);
6437 break;
6438 }
6439
6440 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6441 BUILTIN_TYPE(obj), (void *)data,
6442 is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6443}
6444
6445static int
6446pop_mark_stack(mark_stack_t *stack, VALUE *data)
6447{
6448 if (is_mark_stack_empty(stack)) {
6449 return FALSE;
6450 }
6451 if (stack->index == 1) {
6452 *data = stack->chunk->data[--stack->index];
6453 pop_mark_stack_chunk(stack);
6454 }
6455 else {
6456 *data = stack->chunk->data[--stack->index];
6457 }
6458 return TRUE;
6459}
6460
6461static void
6462init_mark_stack(mark_stack_t *stack)
6463{
6464 int i;
6465
6466 MEMZERO(stack, mark_stack_t, 1);
6467 stack->index = stack->limit = STACK_CHUNK_SIZE;
6468
6469 for (i=0; i < 4; i++) {
6470 add_stack_chunk_cache(stack, stack_chunk_alloc());
6471 }
6472 stack->unused_cache_size = stack->cache_size;
6473}
6474
6475/* Marking */
6476
6477#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6478
6479#define STACK_START (ec->machine.stack_start)
6480#define STACK_END (ec->machine.stack_end)
6481#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6482
6483#if STACK_GROW_DIRECTION < 0
6484# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6485#elif STACK_GROW_DIRECTION > 0
6486# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6487#else
6488# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6489 : (size_t)(STACK_END - STACK_START + 1))
6490#endif
6491#if !STACK_GROW_DIRECTION
6492int ruby_stack_grow_direction;
6493int
6494ruby_get_stack_grow_direction(volatile VALUE *addr)
6495{
6496 VALUE *end;
6497 SET_MACHINE_STACK_END(&end);
6498
6499 if (end > addr) return ruby_stack_grow_direction = 1;
6500 return ruby_stack_grow_direction = -1;
6501}
6502#endif
6503
6504size_t
6506{
6507 rb_execution_context_t *ec = GET_EC();
6508 SET_STACK_END;
6509 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6510 return STACK_LENGTH;
6511}
6512
6513#define PREVENT_STACK_OVERFLOW 1
6514#ifndef PREVENT_STACK_OVERFLOW
6515#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6516# define PREVENT_STACK_OVERFLOW 1
6517#else
6518# define PREVENT_STACK_OVERFLOW 0
6519#endif
6520#endif
6521#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6522static int
6523stack_check(rb_execution_context_t *ec, int water_mark)
6524{
6525 SET_STACK_END;
6526
6527 size_t length = STACK_LENGTH;
6528 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6529
6530 return length > maximum_length;
6531}
6532#else
6533#define stack_check(ec, water_mark) FALSE
6534#endif
6535
6536#define STACKFRAME_FOR_CALL_CFUNC 2048
6537
6538int
6539rb_ec_stack_check(rb_execution_context_t *ec)
6540{
6541 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6542}
6543
6544int
6546{
6547 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6548}
6549
6550ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6551static void
6552each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6553{
6554 VALUE v;
6555 while (n--) {
6556 v = *x;
6557 cb(objspace, v);
6558 x++;
6559 }
6560}
6561
6562static void
6563gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6564{
6565 long n;
6566
6567 if (end <= start) return;
6568 n = end - start;
6569 each_location(objspace, start, n, cb);
6570}
6571
6572void
6573rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6574{
6575 gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6576}
6577
6578void
6579rb_gc_mark_values(long n, const VALUE *values)
6580{
6581 long i;
6582 rb_objspace_t *objspace = &rb_objspace;
6583
6584 for (i=0; i<n; i++) {
6585 gc_mark(objspace, values[i]);
6586 }
6587}
6588
6589static void
6590gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6591{
6592 long i;
6593
6594 for (i=0; i<n; i++) {
6595 if (is_markable_object(values[i])) {
6596 gc_mark_and_pin(objspace, values[i]);
6597 }
6598 }
6599}
6600
6601void
6602rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6603{
6604 rb_objspace_t *objspace = &rb_objspace;
6605 gc_mark_stack_values(objspace, n, values);
6606}
6607
6608static int
6609mark_value(st_data_t key, st_data_t value, st_data_t data)
6610{
6611 rb_objspace_t *objspace = (rb_objspace_t *)data;
6612 gc_mark(objspace, (VALUE)value);
6613 return ST_CONTINUE;
6614}
6615
6616static int
6617mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6618{
6619 rb_objspace_t *objspace = (rb_objspace_t *)data;
6620 gc_mark_and_pin(objspace, (VALUE)value);
6621 return ST_CONTINUE;
6622}
6623
6624static void
6625mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6626{
6627 if (!tbl || tbl->num_entries == 0) return;
6628 st_foreach(tbl, mark_value, (st_data_t)objspace);
6629}
6630
6631static void
6632mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6633{
6634 if (!tbl || tbl->num_entries == 0) return;
6635 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6636}
6637
6638static int
6639mark_key(st_data_t key, st_data_t value, st_data_t data)
6640{
6641 rb_objspace_t *objspace = (rb_objspace_t *)data;
6642 gc_mark_and_pin(objspace, (VALUE)key);
6643 return ST_CONTINUE;
6644}
6645
6646static void
6647mark_set(rb_objspace_t *objspace, st_table *tbl)
6648{
6649 if (!tbl) return;
6650 st_foreach(tbl, mark_key, (st_data_t)objspace);
6651}
6652
6653static int
6654pin_value(st_data_t key, st_data_t value, st_data_t data)
6655{
6656 rb_objspace_t *objspace = (rb_objspace_t *)data;
6657 gc_mark_and_pin(objspace, (VALUE)value);
6658 return ST_CONTINUE;
6659}
6660
6661static void
6662mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6663{
6664 if (!tbl) return;
6665 st_foreach(tbl, pin_value, (st_data_t)objspace);
6666}
6667
6668void
6669rb_mark_set(st_table *tbl)
6670{
6671 mark_set(&rb_objspace, tbl);
6672}
6673
6674static int
6675mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6676{
6677 rb_objspace_t *objspace = (rb_objspace_t *)data;
6678
6679 gc_mark(objspace, (VALUE)key);
6680 gc_mark(objspace, (VALUE)value);
6681 return ST_CONTINUE;
6682}
6683
6684static int
6685pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6686{
6687 rb_objspace_t *objspace = (rb_objspace_t *)data;
6688
6689 gc_mark_and_pin(objspace, (VALUE)key);
6690 gc_mark_and_pin(objspace, (VALUE)value);
6691 return ST_CONTINUE;
6692}
6693
6694static int
6695pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6696{
6697 rb_objspace_t *objspace = (rb_objspace_t *)data;
6698
6699 gc_mark_and_pin(objspace, (VALUE)key);
6700 gc_mark(objspace, (VALUE)value);
6701 return ST_CONTINUE;
6702}
6703
6704static void
6705mark_hash(rb_objspace_t *objspace, VALUE hash)
6706{
6707 if (rb_hash_compare_by_id_p(hash)) {
6708 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6709 }
6710 else {
6711 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6712 }
6713
6714 gc_mark(objspace, RHASH(hash)->ifnone);
6715}
6716
6717static void
6718mark_st(rb_objspace_t *objspace, st_table *tbl)
6719{
6720 if (!tbl) return;
6721 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6722}
6723
6724void
6725rb_mark_hash(st_table *tbl)
6726{
6727 mark_st(&rb_objspace, tbl);
6728}
6729
6730static void
6731mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6732{
6733 const rb_method_definition_t *def = me->def;
6734
6735 gc_mark(objspace, me->owner);
6736 gc_mark(objspace, me->defined_class);
6737
6738 if (def) {
6739 switch (def->type) {
6740 case VM_METHOD_TYPE_ISEQ:
6741 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6742 gc_mark(objspace, (VALUE)def->body.iseq.cref);
6743
6744 if (def->iseq_overload && me->defined_class) {
6745 // it can be a key of "overloaded_cme" table
6746 // so it should be pinned.
6747 gc_mark_and_pin(objspace, (VALUE)me);
6748 }
6749 break;
6750 case VM_METHOD_TYPE_ATTRSET:
6751 case VM_METHOD_TYPE_IVAR:
6752 gc_mark(objspace, def->body.attr.location);
6753 break;
6754 case VM_METHOD_TYPE_BMETHOD:
6755 gc_mark(objspace, def->body.bmethod.proc);
6756 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6757 break;
6758 case VM_METHOD_TYPE_ALIAS:
6759 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6760 return;
6761 case VM_METHOD_TYPE_REFINED:
6762 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6763 break;
6764 case VM_METHOD_TYPE_CFUNC:
6765 case VM_METHOD_TYPE_ZSUPER:
6766 case VM_METHOD_TYPE_MISSING:
6767 case VM_METHOD_TYPE_OPTIMIZED:
6768 case VM_METHOD_TYPE_UNDEF:
6769 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6770 break;
6771 }
6772 }
6773}
6774
6775static enum rb_id_table_iterator_result
6776mark_method_entry_i(VALUE me, void *data)
6777{
6778 rb_objspace_t *objspace = (rb_objspace_t *)data;
6779
6780 gc_mark(objspace, me);
6781 return ID_TABLE_CONTINUE;
6782}
6783
6784static void
6785mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6786{
6787 if (tbl) {
6788 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6789 }
6790}
6791
6792static enum rb_id_table_iterator_result
6793mark_const_entry_i(VALUE value, void *data)
6794{
6795 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6796 rb_objspace_t *objspace = data;
6797
6798 gc_mark(objspace, ce->value);
6799 gc_mark(objspace, ce->file);
6800 return ID_TABLE_CONTINUE;
6801}
6802
6803static void
6804mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6805{
6806 if (!tbl) return;
6807 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6808}
6809
6810#if STACK_GROW_DIRECTION < 0
6811#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6812#elif STACK_GROW_DIRECTION > 0
6813#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6814#else
6815#define GET_STACK_BOUNDS(start, end, appendix) \
6816 ((STACK_END < STACK_START) ? \
6817 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6818#endif
6819
6820static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6821 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6822
6823#if defined(__wasm__)
6824
6825
6826static VALUE *rb_stack_range_tmp[2];
6827
6828static void
6829rb_mark_locations(void *begin, void *end)
6830{
6831 rb_stack_range_tmp[0] = begin;
6832 rb_stack_range_tmp[1] = end;
6833}
6834
6835# if defined(__EMSCRIPTEN__)
6836
6837static void
6838mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6839{
6840 emscripten_scan_stack(rb_mark_locations);
6841 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6842
6843 emscripten_scan_registers(rb_mark_locations);
6844 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6845}
6846# else // use Asyncify version
6847
6848static void
6849mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6850{
6851 VALUE *stack_start, *stack_end;
6852 SET_STACK_END;
6853 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6854 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6855
6856 rb_wasm_scan_locals(rb_mark_locations);
6857 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6858}
6859
6860# endif
6861
6862#else // !defined(__wasm__)
6863
6864static void
6865mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6866{
6867 union {
6868 rb_jmp_buf j;
6869 VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6870 } save_regs_gc_mark;
6871 VALUE *stack_start, *stack_end;
6872
6873 FLUSH_REGISTER_WINDOWS;
6874 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6875 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6876 rb_setjmp(save_regs_gc_mark.j);
6877
6878 /* SET_STACK_END must be called in this function because
6879 * the stack frame of this function may contain
6880 * callee save registers and they should be marked. */
6881 SET_STACK_END;
6882 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6883
6884 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6885
6886 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6887}
6888#endif
6889
6890static void
6891each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6892{
6893 rb_objspace_t *objspace = &rb_objspace;
6894 VALUE *stack_start, *stack_end;
6895
6896 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6897 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
6898 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6899}
6900
6901void
6902rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6903{
6904 each_machine_stack_value(ec, gc_mark_maybe);
6905}
6906
6907static void
6908each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6909 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6910{
6911
6912 gc_mark_locations(objspace, stack_start, stack_end, cb);
6913
6914#if defined(__mc68000__)
6915 gc_mark_locations(objspace,
6916 (VALUE*)((char*)stack_start + 2),
6917 (VALUE*)((char*)stack_end - 2), cb);
6918#endif
6919}
6920
6921void
6922rb_mark_tbl(st_table *tbl)
6923{
6924 mark_tbl(&rb_objspace, tbl);
6925}
6926
6927void
6928rb_mark_tbl_no_pin(st_table *tbl)
6929{
6930 mark_tbl_no_pin(&rb_objspace, tbl);
6931}
6932
6933static void
6934gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6935{
6936 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6937
6938 if (is_pointer_to_heap(objspace, (void *)obj)) {
6939 void *ptr = asan_unpoison_object_temporary(obj);
6940
6941 /* Garbage can live on the stack, so do not mark or pin */
6942 switch (BUILTIN_TYPE(obj)) {
6943 case T_ZOMBIE:
6944 case T_NONE:
6945 break;
6946 default:
6947 gc_mark_and_pin(objspace, obj);
6948 break;
6949 }
6950
6951 if (ptr) {
6952 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6953 asan_poison_object(obj);
6954 }
6955 }
6956}
6957
6958void
6959rb_gc_mark_maybe(VALUE obj)
6960{
6961 gc_mark_maybe(&rb_objspace, obj);
6962}
6963
6964static inline int
6965gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6966{
6967 ASSERT_vm_locking();
6968 if (RVALUE_MARKED(obj)) return 0;
6969 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6970 return 1;
6971}
6972
6973static int
6974gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6975{
6976 struct heap_page *page = GET_HEAP_PAGE(obj);
6977 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6978
6979 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6980 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
6981 MARK_IN_BITMAP(uncollectible_bits, obj);
6982 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6983
6984#if RGENGC_PROFILE > 0
6985 objspace->profile.total_remembered_shady_object_count++;
6986#if RGENGC_PROFILE >= 2
6987 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6988#endif
6989#endif
6990 return TRUE;
6991 }
6992 else {
6993 return FALSE;
6994 }
6995}
6996
6997static void
6998rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
6999{
7000 const VALUE old_parent = objspace->rgengc.parent_object;
7001
7002 if (old_parent) { /* parent object is old */
7003 if (RVALUE_WB_UNPROTECTED(obj) || !RVALUE_OLD_P(obj)) {
7004 rgengc_remember(objspace, old_parent);
7005 }
7006 }
7007
7008 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7009}
7010
7011static void
7012gc_grey(rb_objspace_t *objspace, VALUE obj)
7013{
7014#if RGENGC_CHECK_MODE
7015 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
7016 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
7017#endif
7018
7019 if (is_incremental_marking(objspace)) {
7020 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7021 }
7022
7023 push_mark_stack(&objspace->mark_stack, obj);
7024}
7025
7026static void
7027gc_aging(rb_objspace_t *objspace, VALUE obj)
7028{
7029 struct heap_page *page = GET_HEAP_PAGE(obj);
7030
7031 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7032 check_rvalue_consistency(obj);
7033
7034 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7035 if (!RVALUE_OLD_P(obj)) {
7036 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
7037 RVALUE_AGE_INC(objspace, obj);
7038 }
7039 else if (is_full_marking(objspace)) {
7040 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7041 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7042 }
7043 }
7044 check_rvalue_consistency(obj);
7045
7046 objspace->marked_slots++;
7047}
7048
7049NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
7050static void reachable_objects_from_callback(VALUE obj);
7051
7052static void
7053gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
7054{
7055 if (LIKELY(during_gc)) {
7056 rgengc_check_relation(objspace, obj);
7057 if (!gc_mark_set(objspace, obj)) return; /* already marked */
7058
7059 if (0) { // for debug GC marking miss
7060 if (objspace->rgengc.parent_object) {
7061 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
7062 (void *)obj, obj_type_name(obj),
7063 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7064 }
7065 else {
7066 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
7067 }
7068 }
7069
7070 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7071 rp(obj);
7072 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
7073 }
7074 gc_aging(objspace, obj);
7075 gc_grey(objspace, obj);
7076 }
7077 else {
7078 reachable_objects_from_callback(obj);
7079 }
7080}
7081
7082static inline void
7083gc_pin(rb_objspace_t *objspace, VALUE obj)
7084{
7085 GC_ASSERT(is_markable_object(obj));
7086 if (UNLIKELY(objspace->flags.during_compacting)) {
7087 if (LIKELY(during_gc)) {
7088 if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
7089 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
7090 GET_HEAP_PAGE(obj)->pinned_slots++;
7091 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7092 }
7093 }
7094 }
7095}
7096
7097static inline void
7098gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
7099{
7100 if (!is_markable_object(obj)) return;
7101 gc_pin(objspace, obj);
7102 gc_mark_ptr(objspace, obj);
7103}
7104
7105static inline void
7106gc_mark(rb_objspace_t *objspace, VALUE obj)
7107{
7108 if (!is_markable_object(obj)) return;
7109 gc_mark_ptr(objspace, obj);
7110}
7111
7112void
7113rb_gc_mark_movable(VALUE ptr)
7114{
7115 gc_mark(&rb_objspace, ptr);
7116}
7117
7118void
7119rb_gc_mark(VALUE ptr)
7120{
7121 gc_mark_and_pin(&rb_objspace, ptr);
7122}
7123
7124void
7125rb_gc_mark_and_move(VALUE *ptr)
7126{
7127 rb_objspace_t *objspace = &rb_objspace;
7128 if (RB_SPECIAL_CONST_P(*ptr)) return;
7129
7130 if (UNLIKELY(objspace->flags.during_reference_updating)) {
7131 GC_ASSERT(objspace->flags.during_compacting);
7132 GC_ASSERT(during_gc);
7133
7134 *ptr = rb_gc_location(*ptr);
7135 }
7136 else {
7137 gc_mark_ptr(objspace, *ptr);
7138 }
7139}
7140
7141void
7142rb_gc_mark_weak(VALUE *ptr)
7143{
7144 rb_objspace_t *objspace = &rb_objspace;
7145
7146 if (UNLIKELY(!during_gc)) return;
7147
7148 VALUE obj = *ptr;
7149 if (RB_SPECIAL_CONST_P(obj)) return;
7150
7151 GC_ASSERT(objspace->rgengc.parent_object == 0 || FL_TEST(objspace->rgengc.parent_object, FL_WB_PROTECTED));
7152
7153 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7154 rp(obj);
7155 rb_bug("try to mark T_NONE object");
7156 }
7157
7158 /* If we are in a minor GC and the other object is old, then obj should
7159 * already be marked and cannot be reclaimed in this GC cycle so we don't
7160 * need to add it to the weak refences list. */
7161 if (!is_full_marking(objspace) && RVALUE_OLD_P(obj)) {
7162 GC_ASSERT(RVALUE_MARKED(obj));
7163 GC_ASSERT(!objspace->flags.during_compacting);
7164
7165 return;
7166 }
7167
7168 rgengc_check_relation(objspace, obj);
7169
7170 rb_darray_append_without_gc(&objspace->weak_references, ptr);
7171
7172 objspace->profile.weak_references_count++;
7173}
7174
7175void
7176rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
7177{
7178 rb_objspace_t *objspace = &rb_objspace;
7179
7180 /* If we're not incremental marking, then the state of the objects can't
7181 * change so we don't need to do anything. */
7182 if (!is_incremental_marking(objspace)) return;
7183 /* If parent_obj has not been marked, then ptr has not yet been marked
7184 * weak, so we don't need to do anything. */
7185 if (!RVALUE_MARKED(parent_obj)) return;
7186
7187 VALUE **ptr_ptr;
7188 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
7189 if (*ptr_ptr == ptr) {
7190 *ptr_ptr = NULL;
7191 break;
7192 }
7193 }
7194}
7195
7196/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
7197 * This function is only for GC_END_MARK timing.
7198 */
7199
7200int
7201rb_objspace_marked_object_p(VALUE obj)
7202{
7203 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7204}
7205
7206static inline void
7207gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
7208{
7209 if (RVALUE_OLD_P(obj)) {
7210 objspace->rgengc.parent_object = obj;
7211 }
7212 else {
7213 objspace->rgengc.parent_object = Qfalse;
7214 }
7215}
7216
7217static void
7218gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
7219{
7220 switch (imemo_type(obj)) {
7221 case imemo_env:
7222 {
7223 const rb_env_t *env = (const rb_env_t *)obj;
7224
7225 if (LIKELY(env->ep)) {
7226 // just after newobj() can be NULL here.
7227 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7228 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7229 rb_gc_mark_values((long)env->env_size, env->env);
7230 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7231 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
7232 gc_mark(objspace, (VALUE)env->iseq);
7233 }
7234 }
7235 return;
7236 case imemo_cref:
7237 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7238 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
7239 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7240 return;
7241 case imemo_svar:
7242 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7243 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7244 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7245 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7246 return;
7247 case imemo_throw_data:
7248 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7249 return;
7250 case imemo_ifunc:
7251 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
7252 return;
7253 case imemo_memo:
7254 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7255 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7256 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7257 return;
7258 case imemo_ment:
7259 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7260 return;
7261 case imemo_iseq:
7262 rb_iseq_mark_and_move((rb_iseq_t *)obj, false);
7263 return;
7264 case imemo_tmpbuf:
7265 {
7266 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
7267 do {
7268 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7269 } while ((m = m->next) != NULL);
7270 }
7271 return;
7272 case imemo_ast:
7273 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7274 return;
7275 case imemo_parser_strterm:
7276 return;
7277 case imemo_callinfo:
7278 return;
7279 case imemo_callcache:
7280 /* cc is callcache.
7281 *
7282 * cc->klass (klass) should not be marked because if the klass is
7283 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
7284 *
7285 * cc->cme (cme) should not be marked because if cc is invalidated
7286 * when cme is free'ed.
7287 * - klass marks cme if klass uses cme.
7288 * - caller classe's ccs->cme marks cc->cme.
7289 * - if cc is invalidated (klass doesn't refer the cc),
7290 * cc is invalidated by `vm_cc_invalidate()` and cc->cme is
7291 * not be accessed.
7292 * - On the multi-Ractors, cme will be collected with global GC
7293 * so that it is safe if GC is not interleaving while accessing
7294 * cc and cme.
7295 * - However, cc_type_super and cc_type_refinement are not chained
7296 * from ccs so cc->cme should be marked; the cme might be
7297 * reachable only through cc in these cases.
7298 */
7299 {
7300 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
7301 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
7302 gc_mark(objspace, (VALUE)cc->cme_);
7303 }
7304 }
7305 return;
7306 case imemo_constcache:
7307 {
7309 gc_mark(objspace, ice->value);
7310 }
7311 return;
7312#if VM_CHECK_MODE > 0
7313 default:
7314 VM_UNREACHABLE(gc_mark_imemo);
7315#endif
7316 }
7317}
7318
7319static bool
7320gc_declarative_marking_p(const rb_data_type_t *type)
7321{
7322 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
7323}
7324
7325static void mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass);
7326
7327static void
7328gc_mark_children(rb_objspace_t *objspace, VALUE obj)
7329{
7330 register RVALUE *any = RANY(obj);
7331 gc_mark_set_parent(objspace, obj);
7332
7333 if (FL_TEST(obj, FL_EXIVAR)) {
7334 rb_mark_generic_ivar(obj);
7335 }
7336
7337 switch (BUILTIN_TYPE(obj)) {
7338 case T_FLOAT:
7339 case T_BIGNUM:
7340 case T_SYMBOL:
7341 /* Not immediates, but does not have references and singleton
7342 * class */
7343 return;
7344
7345 case T_NIL:
7346 case T_FIXNUM:
7347 rb_bug("rb_gc_mark() called for broken object");
7348 break;
7349
7350 case T_NODE:
7351 UNEXPECTED_NODE(rb_gc_mark);
7352 break;
7353
7354 case T_IMEMO:
7355 gc_mark_imemo(objspace, obj);
7356 return;
7357
7358 default:
7359 break;
7360 }
7361
7362 gc_mark(objspace, any->as.basic.klass);
7363
7364 switch (BUILTIN_TYPE(obj)) {
7365 case T_CLASS:
7366 if (FL_TEST(obj, FL_SINGLETON)) {
7367 gc_mark(objspace, RCLASS_ATTACHED_OBJECT(obj));
7368 }
7369 // Continue to the shared T_CLASS/T_MODULE
7370 case T_MODULE:
7371 if (RCLASS_SUPER(obj)) {
7372 gc_mark(objspace, RCLASS_SUPER(obj));
7373 }
7374
7375 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7376 mark_cvc_tbl(objspace, obj);
7377 cc_table_mark(objspace, obj);
7378 if (rb_shape_obj_too_complex(obj)) {
7379 mark_tbl_no_pin(objspace, (st_table *)RCLASS_IVPTR(obj));
7380 }
7381 else {
7382 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7383 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7384 }
7385 }
7386 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7387
7388 gc_mark(objspace, RCLASS_EXT(obj)->classpath);
7389 break;
7390
7391 case T_ICLASS:
7392 if (RICLASS_OWNS_M_TBL_P(obj)) {
7393 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7394 }
7395 if (RCLASS_SUPER(obj)) {
7396 gc_mark(objspace, RCLASS_SUPER(obj));
7397 }
7398
7399 if (RCLASS_INCLUDER(obj)) {
7400 gc_mark(objspace, RCLASS_INCLUDER(obj));
7401 }
7402 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7403 cc_table_mark(objspace, obj);
7404 break;
7405
7406 case T_ARRAY:
7407 if (ARY_SHARED_P(obj)) {
7408 VALUE root = ARY_SHARED_ROOT(obj);
7409 gc_mark(objspace, root);
7410 }
7411 else {
7412 long i, len = RARRAY_LEN(obj);
7413 const VALUE *ptr = RARRAY_CONST_PTR(obj);
7414 for (i=0; i < len; i++) {
7415 gc_mark(objspace, ptr[i]);
7416 }
7417 }
7418 break;
7419
7420 case T_HASH:
7421 mark_hash(objspace, obj);
7422 break;
7423
7424 case T_STRING:
7425 if (STR_SHARED_P(obj)) {
7426 if (STR_EMBED_P(any->as.string.as.heap.aux.shared)) {
7427 /* Embedded shared strings cannot be moved because this string
7428 * points into the slot of the shared string. There may be code
7429 * using the RSTRING_PTR on the stack, which would pin this
7430 * string but not pin the shared string, causing it to move. */
7431 gc_mark_and_pin(objspace, any->as.string.as.heap.aux.shared);
7432 }
7433 else {
7434 gc_mark(objspace, any->as.string.as.heap.aux.shared);
7435 }
7436 }
7437 break;
7438
7439 case T_DATA:
7440 {
7441 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
7442
7443 if (ptr) {
7444 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.type)) {
7445 size_t *offset_list = (size_t *)RANY(obj)->as.typeddata.type->function.dmark;
7446
7447 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
7448 rb_gc_mark_movable(*(VALUE *)((char *)ptr + offset));
7449 }
7450 }
7451 else {
7452 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
7453 any->as.typeddata.type->function.dmark :
7454 any->as.data.dmark;
7455 if (mark_func) (*mark_func)(ptr);
7456 }
7457 }
7458 }
7459 break;
7460
7461 case T_OBJECT:
7462 {
7463 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7464 if (rb_shape_obj_too_complex(obj)) {
7465 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7466 }
7467 else {
7468 const VALUE * const ptr = ROBJECT_IVPTR(obj);
7469
7470 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7471 for (i = 0; i < len; i++) {
7472 gc_mark(objspace, ptr[i]);
7473 }
7474 }
7475 if (shape) {
7476 VALUE klass = RBASIC_CLASS(obj);
7477
7478 // Increment max_iv_count if applicable, used to determine size pool allocation
7479 attr_index_t num_of_ivs = shape->next_iv_index;
7480 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7481 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7482 }
7483 }
7484 }
7485 break;
7486
7487 case T_FILE:
7488 if (any->as.file.fptr) {
7489 gc_mark(objspace, any->as.file.fptr->self);
7490 gc_mark(objspace, any->as.file.fptr->pathv);
7491 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7492 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7493 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7494 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7495 gc_mark(objspace, any->as.file.fptr->write_lock);
7496 gc_mark(objspace, any->as.file.fptr->timeout);
7497 }
7498 break;
7499
7500 case T_REGEXP:
7501 gc_mark(objspace, any->as.regexp.src);
7502 break;
7503
7504 case T_MATCH:
7505 gc_mark(objspace, any->as.match.regexp);
7506 if (any->as.match.str) {
7507 gc_mark(objspace, any->as.match.str);
7508 }
7509 break;
7510
7511 case T_RATIONAL:
7512 gc_mark(objspace, any->as.rational.num);
7513 gc_mark(objspace, any->as.rational.den);
7514 break;
7515
7516 case T_COMPLEX:
7517 gc_mark(objspace, any->as.complex.real);
7518 gc_mark(objspace, any->as.complex.imag);
7519 break;
7520
7521 case T_STRUCT:
7522 {
7523 long i;
7524 const long len = RSTRUCT_LEN(obj);
7525 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7526
7527 for (i=0; i<len; i++) {
7528 gc_mark(objspace, ptr[i]);
7529 }
7530 }
7531 break;
7532
7533 default:
7534#if GC_DEBUG
7535 rb_gcdebug_print_obj_condition((VALUE)obj);
7536#endif
7537 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7538 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7539 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7540 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7541 BUILTIN_TYPE(obj), (void *)any,
7542 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7543 }
7544}
7545
7550static inline int
7551gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7552{
7553 mark_stack_t *mstack = &objspace->mark_stack;
7554 VALUE obj;
7555 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7556 size_t popped_count = 0;
7557
7558 while (pop_mark_stack(mstack, &obj)) {
7559 if (UNDEF_P(obj)) continue; /* skip */
7560
7561 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7562 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7563 }
7564 gc_mark_children(objspace, obj);
7565
7566 if (incremental) {
7567 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7568 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7569 }
7570 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7571 popped_count++;
7572
7573 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7574 break;
7575 }
7576 }
7577 else {
7578 /* just ignore marking bits */
7579 }
7580 }
7581
7582 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7583
7584 if (is_mark_stack_empty(mstack)) {
7585 shrink_stack_chunk_cache(mstack);
7586 return TRUE;
7587 }
7588 else {
7589 return FALSE;
7590 }
7591}
7592
7593static int
7594gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7595{
7596 return gc_mark_stacked_objects(objspace, TRUE, count);
7597}
7598
7599static int
7600gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7601{
7602 return gc_mark_stacked_objects(objspace, FALSE, 0);
7603}
7604
7605#if PRINT_ROOT_TICKS
7606#define MAX_TICKS 0x100
7607static tick_t mark_ticks[MAX_TICKS];
7608static const char *mark_ticks_categories[MAX_TICKS];
7609
7610static void
7611show_mark_ticks(void)
7612{
7613 int i;
7614 fprintf(stderr, "mark ticks result:\n");
7615 for (i=0; i<MAX_TICKS; i++) {
7616 const char *category = mark_ticks_categories[i];
7617 if (category) {
7618 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7619 }
7620 else {
7621 break;
7622 }
7623 }
7624}
7625
7626#endif /* PRINT_ROOT_TICKS */
7627
7628static void
7629gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7630{
7631 struct gc_list *list;
7632 rb_execution_context_t *ec = GET_EC();
7633 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7634
7635#if PRINT_ROOT_TICKS
7636 tick_t start_tick = tick();
7637 int tick_count = 0;
7638 const char *prev_category = 0;
7639
7640 if (mark_ticks_categories[0] == 0) {
7641 atexit(show_mark_ticks);
7642 }
7643#endif
7644
7645 if (categoryp) *categoryp = "xxx";
7646
7647 objspace->rgengc.parent_object = Qfalse;
7648
7649#if PRINT_ROOT_TICKS
7650#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7651 if (prev_category) { \
7652 tick_t t = tick(); \
7653 mark_ticks[tick_count] = t - start_tick; \
7654 mark_ticks_categories[tick_count] = prev_category; \
7655 tick_count++; \
7656 } \
7657 prev_category = category; \
7658 start_tick = tick(); \
7659} while (0)
7660#else /* PRINT_ROOT_TICKS */
7661#define MARK_CHECKPOINT_PRINT_TICK(category)
7662#endif
7663
7664#define MARK_CHECKPOINT(category) do { \
7665 if (categoryp) *categoryp = category; \
7666 MARK_CHECKPOINT_PRINT_TICK(category); \
7667} while (0)
7668
7669 MARK_CHECKPOINT("vm");
7670 SET_STACK_END;
7671 rb_vm_mark(vm);
7672 if (vm->self) gc_mark(objspace, vm->self);
7673
7674 MARK_CHECKPOINT("finalizers");
7675 mark_finalizer_tbl(objspace, finalizer_table);
7676
7677 MARK_CHECKPOINT("machine_context");
7678 mark_current_machine_context(objspace, ec);
7679
7680 /* mark protected global variables */
7681 MARK_CHECKPOINT("global_list");
7682 for (list = global_list; list; list = list->next) {
7683 gc_mark_maybe(objspace, *list->varptr);
7684 }
7685
7686 MARK_CHECKPOINT("end_proc");
7687 rb_mark_end_proc();
7688
7689 MARK_CHECKPOINT("global_tbl");
7690 rb_gc_mark_global_tbl();
7691
7692 MARK_CHECKPOINT("object_id");
7693 rb_gc_mark(objspace->next_object_id);
7694 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7695
7696 if (stress_to_class) rb_gc_mark(stress_to_class);
7697
7698 MARK_CHECKPOINT("finish");
7699#undef MARK_CHECKPOINT
7700}
7701
7702#if RGENGC_CHECK_MODE >= 4
7703
7704#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7705#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7706#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7707
7708struct reflist {
7709 VALUE *list;
7710 int pos;
7711 int size;
7712};
7713
7714static struct reflist *
7715reflist_create(VALUE obj)
7716{
7717 struct reflist *refs = xmalloc(sizeof(struct reflist));
7718 refs->size = 1;
7719 refs->list = ALLOC_N(VALUE, refs->size);
7720 refs->list[0] = obj;
7721 refs->pos = 1;
7722 return refs;
7723}
7724
7725static void
7726reflist_destruct(struct reflist *refs)
7727{
7728 xfree(refs->list);
7729 xfree(refs);
7730}
7731
7732static void
7733reflist_add(struct reflist *refs, VALUE obj)
7734{
7735 if (refs->pos == refs->size) {
7736 refs->size *= 2;
7737 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7738 }
7739
7740 refs->list[refs->pos++] = obj;
7741}
7742
7743static void
7744reflist_dump(struct reflist *refs)
7745{
7746 int i;
7747 for (i=0; i<refs->pos; i++) {
7748 VALUE obj = refs->list[i];
7749 if (IS_ROOTSIG(obj)) { /* root */
7750 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7751 }
7752 else {
7753 fprintf(stderr, "<%s>", obj_info(obj));
7754 }
7755 if (i+1 < refs->pos) fprintf(stderr, ", ");
7756 }
7757}
7758
7759static int
7760reflist_referred_from_machine_context(struct reflist *refs)
7761{
7762 int i;
7763 for (i=0; i<refs->pos; i++) {
7764 VALUE obj = refs->list[i];
7765 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7766 }
7767 return 0;
7768}
7769
7770struct allrefs {
7771 rb_objspace_t *objspace;
7772 /* a -> obj1
7773 * b -> obj1
7774 * c -> obj1
7775 * c -> obj2
7776 * d -> obj3
7777 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7778 */
7779 struct st_table *references;
7780 const char *category;
7781 VALUE root_obj;
7783};
7784
7785static int
7786allrefs_add(struct allrefs *data, VALUE obj)
7787{
7788 struct reflist *refs;
7789 st_data_t r;
7790
7791 if (st_lookup(data->references, obj, &r)) {
7792 refs = (struct reflist *)r;
7793 reflist_add(refs, data->root_obj);
7794 return 0;
7795 }
7796 else {
7797 refs = reflist_create(data->root_obj);
7798 st_insert(data->references, obj, (st_data_t)refs);
7799 return 1;
7800 }
7801}
7802
7803static void
7804allrefs_i(VALUE obj, void *ptr)
7805{
7806 struct allrefs *data = (struct allrefs *)ptr;
7807
7808 if (allrefs_add(data, obj)) {
7809 push_mark_stack(&data->mark_stack, obj);
7810 }
7811}
7812
7813static void
7814allrefs_roots_i(VALUE obj, void *ptr)
7815{
7816 struct allrefs *data = (struct allrefs *)ptr;
7817 if (strlen(data->category) == 0) rb_bug("!!!");
7818 data->root_obj = MAKE_ROOTSIG(data->category);
7819
7820 if (allrefs_add(data, obj)) {
7821 push_mark_stack(&data->mark_stack, obj);
7822 }
7823}
7824#define PUSH_MARK_FUNC_DATA(v) do { \
7825 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7826 GET_RACTOR()->mfd = (v);
7827
7828#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7829
7830static st_table *
7831objspace_allrefs(rb_objspace_t *objspace)
7832{
7833 struct allrefs data;
7834 struct gc_mark_func_data_struct mfd;
7835 VALUE obj;
7836 int prev_dont_gc = dont_gc_val();
7837 dont_gc_on();
7838
7839 data.objspace = objspace;
7840 data.references = st_init_numtable();
7841 init_mark_stack(&data.mark_stack);
7842
7843 mfd.mark_func = allrefs_roots_i;
7844 mfd.data = &data;
7845
7846 /* traverse root objects */
7847 PUSH_MARK_FUNC_DATA(&mfd);
7848 GET_RACTOR()->mfd = &mfd;
7849 gc_mark_roots(objspace, &data.category);
7850 POP_MARK_FUNC_DATA();
7851
7852 /* traverse rest objects reachable from root objects */
7853 while (pop_mark_stack(&data.mark_stack, &obj)) {
7854 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7855 }
7856 free_stack_chunks(&data.mark_stack);
7857
7858 dont_gc_set(prev_dont_gc);
7859 return data.references;
7860}
7861
7862static int
7863objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7864{
7865 struct reflist *refs = (struct reflist *)value;
7866 reflist_destruct(refs);
7867 return ST_CONTINUE;
7868}
7869
7870static void
7871objspace_allrefs_destruct(struct st_table *refs)
7872{
7873 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7874 st_free_table(refs);
7875}
7876
7877#if RGENGC_CHECK_MODE >= 5
7878static int
7879allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7880{
7881 VALUE obj = (VALUE)k;
7882 struct reflist *refs = (struct reflist *)v;
7883 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7884 reflist_dump(refs);
7885 fprintf(stderr, "\n");
7886 return ST_CONTINUE;
7887}
7888
7889static void
7890allrefs_dump(rb_objspace_t *objspace)
7891{
7892 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7893 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7894 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7895}
7896#endif
7897
7898static int
7899gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7900{
7901 VALUE obj = k;
7902 struct reflist *refs = (struct reflist *)v;
7903 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7904
7905 /* object should be marked or oldgen */
7906 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7907 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7908 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7909 reflist_dump(refs);
7910
7911 if (reflist_referred_from_machine_context(refs)) {
7912 fprintf(stderr, " (marked from machine stack).\n");
7913 /* marked from machine context can be false positive */
7914 }
7915 else {
7916 objspace->rgengc.error_count++;
7917 fprintf(stderr, "\n");
7918 }
7919 }
7920 return ST_CONTINUE;
7921}
7922
7923static void
7924gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7925{
7926 size_t saved_malloc_increase = objspace->malloc_params.increase;
7927#if RGENGC_ESTIMATE_OLDMALLOC
7928 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7929#endif
7930 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7931
7932 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7933
7934 if (checker_func) {
7935 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7936 }
7937
7938 if (objspace->rgengc.error_count > 0) {
7939#if RGENGC_CHECK_MODE >= 5
7940 allrefs_dump(objspace);
7941#endif
7942 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7943 }
7944
7945 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7946 objspace->rgengc.allrefs_table = 0;
7947
7948 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7949 objspace->malloc_params.increase = saved_malloc_increase;
7950#if RGENGC_ESTIMATE_OLDMALLOC
7951 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7952#endif
7953}
7954#endif /* RGENGC_CHECK_MODE >= 4 */
7955
7957 rb_objspace_t *objspace;
7958 int err_count;
7959 size_t live_object_count;
7960 size_t zombie_object_count;
7961
7962 VALUE parent;
7963 size_t old_object_count;
7964 size_t remembered_shady_count;
7965};
7966
7967static void
7968check_generation_i(const VALUE child, void *ptr)
7969{
7971 const VALUE parent = data->parent;
7972
7973 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7974
7975 if (!RVALUE_OLD_P(child)) {
7976 if (!RVALUE_REMEMBERED(parent) &&
7977 !RVALUE_REMEMBERED(child) &&
7978 !RVALUE_UNCOLLECTIBLE(child)) {
7979 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7980 data->err_count++;
7981 }
7982 }
7983}
7984
7985static void
7986check_color_i(const VALUE child, void *ptr)
7987{
7989 const VALUE parent = data->parent;
7990
7991 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7992 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7993 obj_info(parent), obj_info(child));
7994 data->err_count++;
7995 }
7996}
7997
7998static void
7999check_children_i(const VALUE child, void *ptr)
8000{
8002 if (check_rvalue_consistency_force(child, FALSE) != 0) {
8003 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
8004 obj_info(child), obj_info(data->parent));
8005 rb_print_backtrace(stderr); /* C backtrace will help to debug */
8006
8007 data->err_count++;
8008 }
8009}
8010
8011static int
8012verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
8014{
8015 VALUE obj;
8016 rb_objspace_t *objspace = data->objspace;
8017
8018 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
8019 void *poisoned = asan_unpoison_object_temporary(obj);
8020
8021 if (is_live_object(objspace, obj)) {
8022 /* count objects */
8023 data->live_object_count++;
8024 data->parent = obj;
8025
8026 /* Normally, we don't expect T_MOVED objects to be in the heap.
8027 * But they can stay alive on the stack, */
8028 if (!gc_object_moved_p(objspace, obj)) {
8029 /* moved slots don't have children */
8030 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
8031 }
8032
8033 /* check health of children */
8034 if (RVALUE_OLD_P(obj)) data->old_object_count++;
8035 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
8036
8037 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
8038 /* reachable objects from an oldgen object should be old or (young with remember) */
8039 data->parent = obj;
8040 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
8041 }
8042
8043 if (is_incremental_marking(objspace)) {
8044 if (RVALUE_BLACK_P(obj)) {
8045 /* reachable objects from black objects should be black or grey objects */
8046 data->parent = obj;
8047 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
8048 }
8049 }
8050 }
8051 else {
8052 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
8053 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
8054 data->zombie_object_count++;
8055 }
8056 }
8057 if (poisoned) {
8058 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
8059 asan_poison_object(obj);
8060 }
8061 }
8062
8063 return 0;
8064}
8065
8066static int
8067gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
8068{
8069 unsigned int has_remembered_shady = FALSE;
8070 unsigned int has_remembered_old = FALSE;
8071 int remembered_old_objects = 0;
8072 int free_objects = 0;
8073 int zombie_objects = 0;
8074
8075 short slot_size = page->slot_size;
8076 uintptr_t start = (uintptr_t)page->start;
8077 uintptr_t end = start + page->total_slots * slot_size;
8078
8079 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8080 VALUE val = (VALUE)ptr;
8081 void *poisoned = asan_unpoison_object_temporary(val);
8082 enum ruby_value_type type = BUILTIN_TYPE(val);
8083
8084 if (type == T_NONE) free_objects++;
8085 if (type == T_ZOMBIE) zombie_objects++;
8086 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
8087 has_remembered_shady = TRUE;
8088 }
8089 if (RVALUE_PAGE_MARKING(page, val)) {
8090 has_remembered_old = TRUE;
8091 remembered_old_objects++;
8092 }
8093
8094 if (poisoned) {
8095 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
8096 asan_poison_object(val);
8097 }
8098 }
8099
8100 if (!is_incremental_marking(objspace) &&
8101 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
8102
8103 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8104 VALUE val = (VALUE)ptr;
8105 if (RVALUE_PAGE_MARKING(page, val)) {
8106 fprintf(stderr, "marking -> %s\n", obj_info(val));
8107 }
8108 }
8109 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
8110 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
8111 }
8112
8113 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
8114 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8115 (void *)page, obj ? obj_info(obj) : "");
8116 }
8117
8118 if (0) {
8119 /* free_slots may not equal to free_objects */
8120 if (page->free_slots != free_objects) {
8121 rb_bug("page %p's free_slots should be %d, but %d", (void *)page, page->free_slots, free_objects);
8122 }
8123 }
8124 if (page->final_slots != zombie_objects) {
8125 rb_bug("page %p's final_slots should be %d, but %d", (void *)page, page->final_slots, zombie_objects);
8126 }
8127
8128 return remembered_old_objects;
8129}
8130
8131static int
8132gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
8133{
8134 int remembered_old_objects = 0;
8135 struct heap_page *page = 0;
8136
8137 ccan_list_for_each(head, page, page_node) {
8138 asan_unlock_freelist(page);
8139 RVALUE *p = page->freelist;
8140 while (p) {
8141 VALUE vp = (VALUE)p;
8142 VALUE prev = vp;
8143 asan_unpoison_object(vp, false);
8144 if (BUILTIN_TYPE(vp) != T_NONE) {
8145 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8146 }
8147 p = p->as.free.next;
8148 asan_poison_object(prev);
8149 }
8150 asan_lock_freelist(page);
8151
8152 if (page->flags.has_remembered_objects == FALSE) {
8153 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
8154 }
8155 }
8156
8157 return remembered_old_objects;
8158}
8159
8160static int
8161gc_verify_heap_pages(rb_objspace_t *objspace)
8162{
8163 int remembered_old_objects = 0;
8164 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8165 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8166 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8167 }
8168 return remembered_old_objects;
8169}
8170
8171/*
8172 * call-seq:
8173 * GC.verify_internal_consistency -> nil
8174 *
8175 * Verify internal consistency.
8176 *
8177 * This method is implementation specific.
8178 * Now this method checks generational consistency
8179 * if RGenGC is supported.
8180 */
8181static VALUE
8182gc_verify_internal_consistency_m(VALUE dummy)
8183{
8184 gc_verify_internal_consistency(&rb_objspace);
8185 return Qnil;
8186}
8187
8188static void
8189gc_verify_internal_consistency_(rb_objspace_t *objspace)
8190{
8191 struct verify_internal_consistency_struct data = {0};
8192
8193 data.objspace = objspace;
8194 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
8195
8196 /* check relations */
8197 for (size_t i = 0; i < heap_allocated_pages; i++) {
8198 struct heap_page *page = heap_pages_sorted[i];
8199 short slot_size = page->slot_size;
8200
8201 uintptr_t start = (uintptr_t)page->start;
8202 uintptr_t end = start + page->total_slots * slot_size;
8203
8204 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
8205 }
8206
8207 if (data.err_count != 0) {
8208#if RGENGC_CHECK_MODE >= 5
8209 objspace->rgengc.error_count = data.err_count;
8210 gc_marks_check(objspace, NULL, NULL);
8211 allrefs_dump(objspace);
8212#endif
8213 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
8214 }
8215
8216 /* check heap_page status */
8217 gc_verify_heap_pages(objspace);
8218
8219 /* check counters */
8220
8221 if (!is_lazy_sweeping(objspace) &&
8222 !finalizing &&
8223 ruby_single_main_ractor != NULL) {
8224 if (objspace_live_slots(objspace) != data.live_object_count) {
8225 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", total_freed_objects: %"PRIdSIZE"\n",
8226 heap_pages_final_slots, total_freed_objects(objspace));
8227 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8228 objspace_live_slots(objspace), data.live_object_count);
8229 }
8230 }
8231
8232 if (!is_marking(objspace)) {
8233 if (objspace->rgengc.old_objects != data.old_object_count) {
8234 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8235 objspace->rgengc.old_objects, data.old_object_count);
8236 }
8237 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8238 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
8239 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8240 }
8241 }
8242
8243 if (!finalizing) {
8244 size_t list_count = 0;
8245
8246 {
8247 VALUE z = heap_pages_deferred_final;
8248 while (z) {
8249 list_count++;
8250 z = RZOMBIE(z)->next;
8251 }
8252 }
8253
8254 if (heap_pages_final_slots != data.zombie_object_count ||
8255 heap_pages_final_slots != list_count) {
8256
8257 rb_bug("inconsistent finalizing object count:\n"
8258 " expect %"PRIuSIZE"\n"
8259 " but %"PRIuSIZE" zombies\n"
8260 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
8261 heap_pages_final_slots,
8262 data.zombie_object_count,
8263 list_count);
8264 }
8265 }
8266
8267 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
8268}
8269
8270static void
8271gc_verify_internal_consistency(rb_objspace_t *objspace)
8272{
8273 RB_VM_LOCK_ENTER();
8274 {
8275 rb_vm_barrier(); // stop other ractors
8276
8277 unsigned int prev_during_gc = during_gc;
8278 during_gc = FALSE; // stop gc here
8279 {
8280 gc_verify_internal_consistency_(objspace);
8281 }
8282 during_gc = prev_during_gc;
8283 }
8284 RB_VM_LOCK_LEAVE();
8285}
8286
8287void
8288rb_gc_verify_internal_consistency(void)
8289{
8290 gc_verify_internal_consistency(&rb_objspace);
8291}
8292
8293static void
8294heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
8295{
8296 if (heap->pooled_pages) {
8297 if (heap->free_pages) {
8298 struct heap_page *free_pages_tail = heap->free_pages;
8299 while (free_pages_tail->free_next) {
8300 free_pages_tail = free_pages_tail->free_next;
8301 }
8302 free_pages_tail->free_next = heap->pooled_pages;
8303 }
8304 else {
8305 heap->free_pages = heap->pooled_pages;
8306 }
8307
8308 heap->pooled_pages = NULL;
8309 }
8310}
8311
8312/* marks */
8313
8314static void
8315gc_marks_start(rb_objspace_t *objspace, int full_mark)
8316{
8317 /* start marking */
8318 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
8319 gc_mode_transition(objspace, gc_mode_marking);
8320
8321 if (full_mark) {
8322 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8323 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8324
8325 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
8326 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
8327 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
8328 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8329 objspace->flags.during_minor_gc = FALSE;
8330 if (ruby_enable_autocompact) {
8331 objspace->flags.during_compacting |= TRUE;
8332 }
8333 objspace->profile.major_gc_count++;
8334 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8335 objspace->rgengc.old_objects = 0;
8336 objspace->rgengc.last_major_gc = objspace->profile.count;
8337 objspace->marked_slots = 0;
8338
8339 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8340 rb_size_pool_t *size_pool = &size_pools[i];
8341 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8342 rgengc_mark_and_rememberset_clear(objspace, heap);
8343 heap_move_pooled_pages_to_free_pages(heap);
8344
8345 if (objspace->flags.during_compacting) {
8346 struct heap_page *page = NULL;
8347
8348 ccan_list_for_each(&heap->pages, page, page_node) {
8349 page->pinned_slots = 0;
8350 }
8351 }
8352 }
8353 }
8354 else {
8355 objspace->flags.during_minor_gc = TRUE;
8356 objspace->marked_slots =
8357 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
8358 objspace->profile.minor_gc_count++;
8359
8360 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8361 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8362 }
8363 }
8364
8365 gc_mark_roots(objspace, NULL);
8366
8367 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
8368 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
8369}
8370
8371static inline void
8372gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8373{
8374 if (bits) {
8375 do {
8376 if (bits & 1) {
8377 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
8378 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
8379 GC_ASSERT(RVALUE_MARKED((VALUE)p));
8380 gc_mark_children(objspace, (VALUE)p);
8381 }
8382 p += BASE_SLOT_SIZE;
8383 bits >>= 1;
8384 } while (bits);
8385 }
8386}
8387
8388static void
8389gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
8390{
8391 struct heap_page *page = 0;
8392
8393 ccan_list_for_each(&heap->pages, page, page_node) {
8394 bits_t *mark_bits = page->mark_bits;
8395 bits_t *wbun_bits = page->wb_unprotected_bits;
8396 uintptr_t p = page->start;
8397 size_t j;
8398
8399 bits_t bits = mark_bits[0] & wbun_bits[0];
8400 bits >>= NUM_IN_PAGE(p);
8401 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8402 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8403
8404 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8405 bits_t bits = mark_bits[j] & wbun_bits[j];
8406
8407 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8408 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8409 }
8410 }
8411
8412 gc_mark_stacked_objects_all(objspace);
8413}
8414
8415static void
8416gc_update_weak_references(rb_objspace_t *objspace)
8417{
8418 size_t retained_weak_references_count = 0;
8419 VALUE **ptr_ptr;
8420 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
8421 if (!*ptr_ptr) continue;
8422
8423 VALUE obj = **ptr_ptr;
8424
8425 if (RB_SPECIAL_CONST_P(obj)) continue;
8426
8427 if (!RVALUE_MARKED(obj)) {
8428 **ptr_ptr = Qundef;
8429 }
8430 else {
8431 retained_weak_references_count++;
8432 }
8433 }
8434
8435 objspace->profile.retained_weak_references_count = retained_weak_references_count;
8436
8437 rb_darray_clear(objspace->weak_references);
8438 rb_darray_resize_capa_without_gc(&objspace->weak_references, retained_weak_references_count);
8439}
8440
8441static void
8442gc_marks_finish(rb_objspace_t *objspace)
8443{
8444 /* finish incremental GC */
8445 if (is_incremental_marking(objspace)) {
8446 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8447 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
8448 mark_stack_size(&objspace->mark_stack));
8449 }
8450
8451 gc_mark_roots(objspace, 0);
8452 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
8453
8454#if RGENGC_CHECK_MODE >= 2
8455 if (gc_verify_heap_pages(objspace) != 0) {
8456 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
8457 }
8458#endif
8459
8460 objspace->flags.during_incremental_marking = FALSE;
8461 /* check children of all marked wb-unprotected objects */
8462 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8463 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8464 }
8465 }
8466
8467 gc_update_weak_references(objspace);
8468
8469#if RGENGC_CHECK_MODE >= 2
8470 gc_verify_internal_consistency(objspace);
8471#endif
8472
8473#if RGENGC_CHECK_MODE >= 4
8474 during_gc = FALSE;
8475 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
8476 during_gc = TRUE;
8477#endif
8478
8479 {
8480 /* decide full GC is needed or not */
8481 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8482 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
8483 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8484 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8485 int full_marking = is_full_marking(objspace);
8486 const int r_cnt = GET_VM()->ractor.cnt;
8487 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8488
8489 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8490
8491 /* Setup freeable slots. */
8492 size_t total_init_slots = 0;
8493 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8494 total_init_slots += gc_params.size_pool_init_slots[i] * r_mul;
8495 }
8496
8497 if (max_free_slots < total_init_slots) {
8498 max_free_slots = total_init_slots;
8499 }
8500
8501 if (sweep_slots > max_free_slots) {
8502 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8503 }
8504 else {
8505 heap_pages_freeable_pages = 0;
8506 }
8507
8508 /* check free_min */
8509 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8510 min_free_slots = gc_params.heap_free_slots * r_mul;
8511 }
8512
8513 if (sweep_slots < min_free_slots) {
8514 if (!full_marking) {
8515 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8516 full_marking = TRUE;
8517 /* do not update last_major_gc, because full marking is not done. */
8518 /* goto increment; */
8519 }
8520 else {
8521 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8522 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8523 }
8524 }
8525 }
8526
8527 if (full_marking) {
8528 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8529 const double r = gc_params.oldobject_limit_factor;
8530 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
8531 (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
8532 (size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
8533 );
8534 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8535 }
8536
8537 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8538 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8539 }
8540 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8541 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8542 }
8543 if (RGENGC_FORCE_MAJOR_GC) {
8544 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8545 }
8546
8547 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8548 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8549 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8550 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8551 objspace->rgengc.need_major_gc ? "major" : "minor");
8552 }
8553
8554 rb_ractor_finish_marking();
8555
8556 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8557}
8558
8559static bool
8560gc_compact_heap_cursors_met_p(rb_heap_t *heap)
8561{
8562 return heap->sweeping_page == heap->compact_cursor;
8563}
8564
8565static rb_size_pool_t *
8566gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, VALUE src)
8567{
8568 size_t obj_size;
8569 size_t idx = 0;
8570
8571 switch (BUILTIN_TYPE(src)) {
8572 case T_ARRAY:
8573 obj_size = rb_ary_size_as_embedded(src);
8574 break;
8575
8576 case T_OBJECT:
8577 if (rb_shape_obj_too_complex(src)) {
8578 return &size_pools[0];
8579 }
8580 else {
8581 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8582 }
8583 break;
8584
8585 case T_STRING:
8586 obj_size = rb_str_size_as_embedded(src);
8587 break;
8588
8589 case T_HASH:
8590 obj_size = sizeof(struct RHash) + (RHASH_ST_TABLE_P(src) ? sizeof(st_table) : sizeof(ar_table));
8591 break;
8592
8593 default:
8594 return src_pool;
8595 }
8596
8597 if (rb_gc_size_allocatable_p(obj_size)){
8598 idx = size_pool_idx_for_size(obj_size);
8599 }
8600 return &size_pools[idx];
8601}
8602
8603static bool
8604gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_pool, VALUE src)
8605{
8606 GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
8607 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8608
8609 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8610 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8611 rb_shape_t *new_shape = NULL;
8612 rb_shape_t *orig_shape = NULL;
8613
8614 if (gc_compact_heap_cursors_met_p(dheap)) {
8615 return dheap != heap;
8616 }
8617
8618 if (RB_TYPE_P(src, T_OBJECT)) {
8619 orig_shape = rb_shape_get_shape(src);
8620 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8621 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8622 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8623
8624 if (!new_shape) {
8625 dest_pool = size_pool;
8626 dheap = heap;
8627 }
8628 }
8629 }
8630
8631 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8632 struct gc_sweep_context ctx = {
8633 .page = dheap->sweeping_page,
8634 .final_slots = 0,
8635 .freed_slots = 0,
8636 .empty_slots = 0,
8637 };
8638
8639 /* The page of src could be partially compacted, so it may contain
8640 * T_MOVED. Sweeping a page may read objects on this page, so we
8641 * need to lock the page. */
8642 lock_page_body(objspace, GET_PAGE_BODY(src));
8643 gc_sweep_page(objspace, dheap, &ctx);
8644 unlock_page_body(objspace, GET_PAGE_BODY(src));
8645
8646 if (dheap->sweeping_page->free_slots > 0) {
8647 heap_add_freepage(dheap, dheap->sweeping_page);
8648 }
8649
8650 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8651 if (gc_compact_heap_cursors_met_p(dheap)) {
8652 return dheap != heap;
8653 }
8654 }
8655
8656 if (orig_shape) {
8657 if (new_shape) {
8658 VALUE dest = rb_gc_location(src);
8659 rb_shape_set_shape(dest, new_shape);
8660 }
8661 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8662 }
8663
8664 return true;
8665}
8666
8667static bool
8668gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
8669{
8670 short slot_size = page->slot_size;
8671 short slot_bits = slot_size / BASE_SLOT_SIZE;
8672 GC_ASSERT(slot_bits > 0);
8673
8674 do {
8675 VALUE vp = (VALUE)p;
8676 GC_ASSERT(vp % sizeof(RVALUE) == 0);
8677
8678 if (bitset & 1) {
8679 objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
8680
8681 if (gc_is_moveable_obj(objspace, vp)) {
8682 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8683 //the cursors met. bubble up
8684 return false;
8685 }
8686 }
8687 }
8688 p += slot_size;
8689 bitset >>= slot_bits;
8690 } while (bitset);
8691
8692 return true;
8693}
8694
8695// Iterate up all the objects in page, moving them to where they want to go
8696static bool
8697gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
8698{
8699 GC_ASSERT(page == heap->compact_cursor);
8700
8701 bits_t *mark_bits, *pin_bits;
8702 bits_t bitset;
8703 uintptr_t p = page->start;
8704
8705 mark_bits = page->mark_bits;
8706 pin_bits = page->pinned_bits;
8707
8708 // objects that can be moved are marked and not pinned
8709 bitset = (mark_bits[0] & ~pin_bits[0]);
8710 bitset >>= NUM_IN_PAGE(p);
8711 if (bitset) {
8712 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8713 return false;
8714 }
8715 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8716
8717 for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8718 bitset = (mark_bits[j] & ~pin_bits[j]);
8719 if (bitset) {
8720 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8721 return false;
8722 }
8723 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8724 }
8725
8726 return true;
8727}
8728
8729static bool
8730gc_compact_all_compacted_p(rb_objspace_t *objspace)
8731{
8732 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8733 rb_size_pool_t *size_pool = &size_pools[i];
8734 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8735
8736 if (heap->total_pages > 0 &&
8737 !gc_compact_heap_cursors_met_p(heap)) {
8738 return false;
8739 }
8740 }
8741
8742 return true;
8743}
8744
8745static void
8746gc_sweep_compact(rb_objspace_t *objspace)
8747{
8748 gc_compact_start(objspace);
8749#if RGENGC_CHECK_MODE >= 2
8750 gc_verify_internal_consistency(objspace);
8751#endif
8752
8753 while (!gc_compact_all_compacted_p(objspace)) {
8754 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8755 rb_size_pool_t *size_pool = &size_pools[i];
8756 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8757
8758 if (gc_compact_heap_cursors_met_p(heap)) {
8759 continue;
8760 }
8761
8762 struct heap_page *start_page = heap->compact_cursor;
8763
8764 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8765 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8766
8767 continue;
8768 }
8769
8770 // If we get here, we've finished moving all objects on the compact_cursor page
8771 // So we can lock it and move the cursor on to the next one.
8772 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8773 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8774 }
8775 }
8776
8777 gc_compact_finish(objspace);
8778
8779#if RGENGC_CHECK_MODE >= 2
8780 gc_verify_internal_consistency(objspace);
8781#endif
8782}
8783
8784static void
8785gc_marks_rest(rb_objspace_t *objspace)
8786{
8787 gc_report(1, objspace, "gc_marks_rest\n");
8788
8789 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8790 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8791 }
8792
8793 if (is_incremental_marking(objspace)) {
8794 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8795 }
8796 else {
8797 gc_mark_stacked_objects_all(objspace);
8798 }
8799
8800 gc_marks_finish(objspace);
8801}
8802
8803static bool
8804gc_marks_step(rb_objspace_t *objspace, size_t slots)
8805{
8806 bool marking_finished = false;
8807
8808 GC_ASSERT(is_marking(objspace));
8809 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8810 gc_marks_finish(objspace);
8811
8812 marking_finished = true;
8813 }
8814
8815 return marking_finished;
8816}
8817
8818static bool
8819gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8820{
8821 GC_ASSERT(dont_gc_val() == FALSE);
8822 bool marking_finished = true;
8823
8824 gc_marking_enter(objspace);
8825
8826 if (heap->free_pages) {
8827 gc_report(2, objspace, "gc_marks_continue: has pooled pages");
8828
8829 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
8830 }
8831 else {
8832 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8833 mark_stack_size(&objspace->mark_stack));
8834 size_pool->force_incremental_marking_finish_count++;
8835 gc_marks_rest(objspace);
8836 }
8837
8838 gc_marking_exit(objspace);
8839
8840 return marking_finished;
8841}
8842
8843static bool
8844gc_marks(rb_objspace_t *objspace, int full_mark)
8845{
8846 gc_prof_mark_timer_start(objspace);
8847 gc_marking_enter(objspace);
8848
8849 bool marking_finished = false;
8850
8851 /* setup marking */
8852
8853 gc_marks_start(objspace, full_mark);
8854 if (!is_incremental_marking(objspace)) {
8855 gc_marks_rest(objspace);
8856 marking_finished = true;
8857 }
8858
8859#if RGENGC_PROFILE > 0
8860 if (gc_prof_record(objspace)) {
8861 gc_profile_record *record = gc_prof_record(objspace);
8862 record->old_objects = objspace->rgengc.old_objects;
8863 }
8864#endif
8865
8866 gc_marking_exit(objspace);
8867 gc_prof_mark_timer_stop(objspace);
8868
8869 return marking_finished;
8870}
8871
8872/* RGENGC */
8873
8874static void
8875gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8876{
8877 if (level <= RGENGC_DEBUG) {
8878 char buf[1024];
8879 FILE *out = stderr;
8880 va_list args;
8881 const char *status = " ";
8882
8883 if (during_gc) {
8884 status = is_full_marking(objspace) ? "+" : "-";
8885 }
8886 else {
8887 if (is_lazy_sweeping(objspace)) {
8888 status = "S";
8889 }
8890 if (is_incremental_marking(objspace)) {
8891 status = "M";
8892 }
8893 }
8894
8895 va_start(args, fmt);
8896 vsnprintf(buf, 1024, fmt, args);
8897 va_end(args);
8898
8899 fprintf(out, "%s|", status);
8900 fputs(buf, out);
8901 }
8902}
8903
8904/* bit operations */
8905
8906static int
8907rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8908{
8909 struct heap_page *page = GET_HEAP_PAGE(obj);
8910 bits_t *bits = &page->remembered_bits[0];
8911
8912 if (MARKED_IN_BITMAP(bits, obj)) {
8913 return FALSE;
8914 }
8915 else {
8916 page->flags.has_remembered_objects = TRUE;
8917 MARK_IN_BITMAP(bits, obj);
8918 return TRUE;
8919 }
8920}
8921
8922/* wb, etc */
8923
8924/* return FALSE if already remembered */
8925static int
8926rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8927{
8928 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8929 RVALUE_REMEMBERED(obj) ? "was already remembered" : "is remembered now");
8930
8931 check_rvalue_consistency(obj);
8932
8933 if (RGENGC_CHECK_MODE) {
8934 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8935 }
8936
8937#if RGENGC_PROFILE > 0
8938 if (!RVALUE_REMEMBERED(obj)) {
8939 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8940 objspace->profile.total_remembered_normal_object_count++;
8941#if RGENGC_PROFILE >= 2
8942 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8943#endif
8944 }
8945 }
8946#endif /* RGENGC_PROFILE > 0 */
8947
8948 return rgengc_remembersetbits_set(objspace, obj);
8949}
8950
8951#ifndef PROFILE_REMEMBERSET_MARK
8952#define PROFILE_REMEMBERSET_MARK 0
8953#endif
8954
8955static inline void
8956rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8957{
8958 if (bitset) {
8959 do {
8960 if (bitset & 1) {
8961 VALUE obj = (VALUE)p;
8962 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8963 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8964 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8965
8966 gc_mark_children(objspace, obj);
8967 }
8968 p += BASE_SLOT_SIZE;
8969 bitset >>= 1;
8970 } while (bitset);
8971 }
8972}
8973
8974static void
8975rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8976{
8977 size_t j;
8978 struct heap_page *page = 0;
8979#if PROFILE_REMEMBERSET_MARK
8980 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8981#endif
8982 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8983
8984 ccan_list_for_each(&heap->pages, page, page_node) {
8985 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
8986 uintptr_t p = page->start;
8987 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8988 bits_t *remembered_bits = page->remembered_bits;
8989 bits_t *uncollectible_bits = page->uncollectible_bits;
8990 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8991#if PROFILE_REMEMBERSET_MARK
8992 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
8993 else if (page->flags.has_remembered_objects) has_old++;
8994 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
8995#endif
8996 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8997 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8998 remembered_bits[j] = 0;
8999 }
9000 page->flags.has_remembered_objects = FALSE;
9001
9002 bitset = bits[0];
9003 bitset >>= NUM_IN_PAGE(p);
9004 rgengc_rememberset_mark_plane(objspace, p, bitset);
9005 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
9006
9007 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
9008 bitset = bits[j];
9009 rgengc_rememberset_mark_plane(objspace, p, bitset);
9010 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
9011 }
9012 }
9013#if PROFILE_REMEMBERSET_MARK
9014 else {
9015 skip++;
9016 }
9017#endif
9018 }
9019
9020#if PROFILE_REMEMBERSET_MARK
9021 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
9022#endif
9023 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
9024}
9025
9026static void
9027rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
9028{
9029 struct heap_page *page = 0;
9030
9031 ccan_list_for_each(&heap->pages, page, page_node) {
9032 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9033 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9034 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9035 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9036 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9037 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
9038 page->flags.has_remembered_objects = FALSE;
9039 }
9040}
9041
9042/* RGENGC: APIs */
9043
9044NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
9045
9046static void
9047gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
9048{
9049 if (RGENGC_CHECK_MODE) {
9050 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
9051 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
9052 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
9053 }
9054
9055 /* mark `a' and remember (default behavior) */
9056 if (!RVALUE_REMEMBERED(a)) {
9057 RB_VM_LOCK_ENTER_NO_BARRIER();
9058 {
9059 rgengc_remember(objspace, a);
9060 }
9061 RB_VM_LOCK_LEAVE_NO_BARRIER();
9062 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
9063 }
9064
9065 check_rvalue_consistency(a);
9066 check_rvalue_consistency(b);
9067}
9068
9069static void
9070gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
9071{
9072 gc_mark_set_parent(objspace, parent);
9073 rgengc_check_relation(objspace, obj);
9074 if (gc_mark_set(objspace, obj) == FALSE) return;
9075 gc_aging(objspace, obj);
9076 gc_grey(objspace, obj);
9077}
9078
9079NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
9080
9081static void
9082gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
9083{
9084 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
9085
9086 if (RVALUE_BLACK_P(a)) {
9087 if (RVALUE_WHITE_P(b)) {
9088 if (!RVALUE_WB_UNPROTECTED(a)) {
9089 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
9090 gc_mark_from(objspace, b, a);
9091 }
9092 }
9093 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9094 rgengc_remember(objspace, a);
9095 }
9096
9097 if (UNLIKELY(objspace->flags.during_compacting)) {
9098 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9099 }
9100 }
9101}
9102
9103void
9104rb_gc_writebarrier(VALUE a, VALUE b)
9105{
9106 rb_objspace_t *objspace = &rb_objspace;
9107
9108 if (RGENGC_CHECK_MODE) {
9109 if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9110 if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9111 }
9112
9113 retry:
9114 if (!is_incremental_marking(objspace)) {
9115 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9116 // do nothing
9117 }
9118 else {
9119 gc_writebarrier_generational(a, b, objspace);
9120 }
9121 }
9122 else {
9123 bool retry = false;
9124 /* slow path */
9125 RB_VM_LOCK_ENTER_NO_BARRIER();
9126 {
9127 if (is_incremental_marking(objspace)) {
9128 gc_writebarrier_incremental(a, b, objspace);
9129 }
9130 else {
9131 retry = true;
9132 }
9133 }
9134 RB_VM_LOCK_LEAVE_NO_BARRIER();
9135
9136 if (retry) goto retry;
9137 }
9138 return;
9139}
9140
9141void
9142rb_gc_writebarrier_unprotect(VALUE obj)
9143{
9144 if (RVALUE_WB_UNPROTECTED(obj)) {
9145 return;
9146 }
9147 else {
9148 rb_objspace_t *objspace = &rb_objspace;
9149
9150 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9151 RVALUE_REMEMBERED(obj) ? " (already remembered)" : "");
9152
9153 RB_VM_LOCK_ENTER_NO_BARRIER();
9154 {
9155 if (RVALUE_OLD_P(obj)) {
9156 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9157 RVALUE_DEMOTE(objspace, obj);
9158 gc_mark_set(objspace, obj);
9159 gc_remember_unprotected(objspace, obj);
9160
9161#if RGENGC_PROFILE
9162 objspace->profile.total_shade_operation_count++;
9163#if RGENGC_PROFILE >= 2
9164 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
9165#endif /* RGENGC_PROFILE >= 2 */
9166#endif /* RGENGC_PROFILE */
9167 }
9168 else {
9169 RVALUE_AGE_RESET(obj);
9170 }
9171
9172 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9173 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9174 }
9175 RB_VM_LOCK_LEAVE_NO_BARRIER();
9176 }
9177}
9178
9179/*
9180 * remember `obj' if needed.
9181 */
9182void
9183rb_gc_writebarrier_remember(VALUE obj)
9184{
9185 rb_objspace_t *objspace = &rb_objspace;
9186
9187 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9188
9189 if (is_incremental_marking(objspace)) {
9190 if (RVALUE_BLACK_P(obj)) {
9191 gc_grey(objspace, obj);
9192 }
9193 }
9194 else {
9195 if (RVALUE_OLD_P(obj)) {
9196 rgengc_remember(objspace, obj);
9197 }
9198 }
9199}
9200
9201void
9202rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
9203{
9204 rb_objspace_t *objspace = &rb_objspace;
9205
9206 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9207 if (!RVALUE_OLD_P(dest)) {
9208 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9209 RVALUE_AGE_RESET(dest);
9210 }
9211 else {
9212 RVALUE_DEMOTE(objspace, dest);
9213 }
9214 }
9215
9216 check_rvalue_consistency(dest);
9217}
9218
9219/* RGENGC analysis information */
9220
9221VALUE
9222rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
9223{
9224 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9225}
9226
9227VALUE
9228rb_obj_rgengc_promoted_p(VALUE obj)
9229{
9230 return RBOOL(OBJ_PROMOTED(obj));
9231}
9232
9233size_t
9234rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
9235{
9236 size_t n = 0;
9237 static ID ID_marked;
9238 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9239
9240 if (!ID_marked) {
9241#define I(s) ID_##s = rb_intern(#s);
9242 I(marked);
9243 I(wb_protected);
9244 I(old);
9245 I(marking);
9246 I(uncollectible);
9247 I(pinned);
9248#undef I
9249 }
9250
9251 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9252 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9253 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9254 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9255 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9256 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9257 return n;
9258}
9259
9260/* GC */
9261
9262void
9263rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
9264{
9265 newobj_cache->incremental_mark_step_allocated_slots = 0;
9266
9267 for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9268 rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
9269
9270 struct heap_page *page = cache->using_page;
9271 RVALUE *freelist = cache->freelist;
9272 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
9273
9274 heap_page_freelist_append(page, freelist);
9275
9276 cache->using_page = NULL;
9277 cache->freelist = NULL;
9278 }
9279}
9280
9281void
9282rb_gc_force_recycle(VALUE obj)
9283{
9284 /* no-op */
9285}
9286
9287#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9288#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9289#endif
9290
9291void
9292rb_gc_register_mark_object(VALUE obj)
9293{
9294 if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
9295 return;
9296
9297 RB_VM_LOCK_ENTER();
9298 {
9299 VALUE ary_ary = GET_VM()->mark_object_ary;
9300 VALUE ary = rb_ary_last(0, 0, ary_ary);
9301
9302 if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
9303 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9304 rb_ary_push(ary_ary, ary);
9305 }
9306
9307 rb_ary_push(ary, obj);
9308 }
9309 RB_VM_LOCK_LEAVE();
9310}
9311
9312void
9313rb_gc_register_address(VALUE *addr)
9314{
9315 rb_objspace_t *objspace = &rb_objspace;
9316 struct gc_list *tmp;
9317
9318 VALUE obj = *addr;
9319
9320 tmp = ALLOC(struct gc_list);
9321 tmp->next = global_list;
9322 tmp->varptr = addr;
9323 global_list = tmp;
9324
9325 /*
9326 * Because some C extensions have assignment-then-register bugs,
9327 * we guard `obj` here so that it would not get swept defensively.
9328 */
9329 RB_GC_GUARD(obj);
9330 if (0 && !SPECIAL_CONST_P(obj)) {
9331 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
9332 rb_obj_class(obj));
9333 rb_print_backtrace(stderr);
9334 }
9335}
9336
9337void
9338rb_gc_unregister_address(VALUE *addr)
9339{
9340 rb_objspace_t *objspace = &rb_objspace;
9341 struct gc_list *tmp = global_list;
9342
9343 if (tmp->varptr == addr) {
9344 global_list = tmp->next;
9345 xfree(tmp);
9346 return;
9347 }
9348 while (tmp->next) {
9349 if (tmp->next->varptr == addr) {
9350 struct gc_list *t = tmp->next;
9351
9352 tmp->next = tmp->next->next;
9353 xfree(t);
9354 break;
9355 }
9356 tmp = tmp->next;
9357 }
9358}
9359
9360void
9362{
9363 rb_gc_register_address(var);
9364}
9365
9366#define GC_NOTIFY 0
9367
9368enum {
9369 gc_stress_no_major,
9370 gc_stress_no_immediate_sweep,
9371 gc_stress_full_mark_after_malloc,
9372 gc_stress_max
9373};
9374
9375#define gc_stress_full_mark_after_malloc_p() \
9376 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9377
9378static void
9379heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
9380{
9381 if (!heap->free_pages) {
9382 if (!heap_increment(objspace, size_pool, heap)) {
9383 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9384 heap_increment(objspace, size_pool, heap);
9385 }
9386 }
9387}
9388
9389static int
9390ready_to_gc(rb_objspace_t *objspace)
9391{
9392 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9393 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9394 rb_size_pool_t *size_pool = &size_pools[i];
9395 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9396 }
9397 return FALSE;
9398 }
9399 else {
9400 return TRUE;
9401 }
9402}
9403
9404static void
9405gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
9406{
9407 gc_prof_set_malloc_info(objspace);
9408 {
9409 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9410 size_t old_limit = malloc_limit;
9411
9412 if (inc > malloc_limit) {
9413 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9414 if (malloc_limit > gc_params.malloc_limit_max) {
9415 malloc_limit = gc_params.malloc_limit_max;
9416 }
9417 }
9418 else {
9419 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
9420 if (malloc_limit < gc_params.malloc_limit_min) {
9421 malloc_limit = gc_params.malloc_limit_min;
9422 }
9423 }
9424
9425 if (0) {
9426 if (old_limit != malloc_limit) {
9427 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
9428 rb_gc_count(), old_limit, malloc_limit);
9429 }
9430 else {
9431 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
9432 rb_gc_count(), malloc_limit);
9433 }
9434 }
9435 }
9436
9437 /* reset oldmalloc info */
9438#if RGENGC_ESTIMATE_OLDMALLOC
9439 if (!full_mark) {
9440 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9441 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9442 objspace->rgengc.oldmalloc_increase_limit =
9443 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9444
9445 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9446 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9447 }
9448 }
9449
9450 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
9451 rb_gc_count(),
9452 objspace->rgengc.need_major_gc,
9453 objspace->rgengc.oldmalloc_increase,
9454 objspace->rgengc.oldmalloc_increase_limit,
9455 gc_params.oldmalloc_limit_max);
9456 }
9457 else {
9458 /* major GC */
9459 objspace->rgengc.oldmalloc_increase = 0;
9460
9461 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9462 objspace->rgengc.oldmalloc_increase_limit =
9463 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9464 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9465 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9466 }
9467 }
9468 }
9469#endif
9470}
9471
9472static int
9473garbage_collect(rb_objspace_t *objspace, unsigned int reason)
9474{
9475 int ret;
9476
9477 RB_VM_LOCK_ENTER();
9478 {
9479#if GC_PROFILE_MORE_DETAIL
9480 objspace->profile.prepare_time = getrusage_time();
9481#endif
9482
9483 gc_rest(objspace);
9484
9485#if GC_PROFILE_MORE_DETAIL
9486 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9487#endif
9488
9489 ret = gc_start(objspace, reason);
9490 }
9491 RB_VM_LOCK_LEAVE();
9492
9493 return ret;
9494}
9495
9496static int
9497gc_start(rb_objspace_t *objspace, unsigned int reason)
9498{
9499 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9500
9501 /* reason may be clobbered, later, so keep set immediate_sweep here */
9502 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9503
9504 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
9505 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
9506
9507 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9508 GC_ASSERT(!is_lazy_sweeping(objspace));
9509 GC_ASSERT(!is_incremental_marking(objspace));
9510
9511 unsigned int lock_lev;
9512 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9513
9514#if RGENGC_CHECK_MODE >= 2
9515 gc_verify_internal_consistency(objspace);
9516#endif
9517
9518 if (ruby_gc_stressful) {
9519 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
9520
9521 if ((flag & (1<<gc_stress_no_major)) == 0) {
9522 do_full_mark = TRUE;
9523 }
9524
9525 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9526 }
9527
9528 if (objspace->rgengc.need_major_gc) {
9529 reason |= objspace->rgengc.need_major_gc;
9530 do_full_mark = TRUE;
9531 }
9532 else if (RGENGC_FORCE_MAJOR_GC) {
9533 reason = GPR_FLAG_MAJOR_BY_FORCE;
9534 do_full_mark = TRUE;
9535 }
9536
9537 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9538
9539 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9540 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
9541 }
9542
9543 if (objspace->flags.dont_incremental ||
9544 reason & GPR_FLAG_IMMEDIATE_MARK ||
9545 ruby_gc_stressful) {
9546 objspace->flags.during_incremental_marking = FALSE;
9547 }
9548 else {
9549 objspace->flags.during_incremental_marking = do_full_mark;
9550 }
9551
9552 /* Explicitly enable compaction (GC.compact) */
9553 if (do_full_mark && ruby_enable_autocompact) {
9554 objspace->flags.during_compacting = TRUE;
9555#if RGENGC_CHECK_MODE
9556 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
9557#endif
9558 }
9559 else {
9560 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9561 }
9562
9563 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9564 objspace->flags.immediate_sweep = TRUE;
9565 }
9566
9567 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9568
9569 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
9570 reason,
9571 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9572
9573#if USE_DEBUG_COUNTER
9574 RB_DEBUG_COUNTER_INC(gc_count);
9575
9576 if (reason & GPR_FLAG_MAJOR_MASK) {
9577 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9578 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9579 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9580 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9581#if RGENGC_ESTIMATE_OLDMALLOC
9582 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9583#endif
9584 }
9585 else {
9586 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9587 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9588 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9589 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9590 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9591 }
9592#endif
9593
9594 objspace->profile.count++;
9595 objspace->profile.latest_gc_info = reason;
9596 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
9597 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9598 objspace->profile.weak_references_count = 0;
9599 objspace->profile.retained_weak_references_count = 0;
9600 gc_prof_setup_new_record(objspace, reason);
9601 gc_reset_malloc_info(objspace, do_full_mark);
9602
9603 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
9604 GC_ASSERT(during_gc);
9605
9606 gc_prof_timer_start(objspace);
9607 {
9608 if (gc_marks(objspace, do_full_mark)) {
9609 gc_sweep(objspace);
9610 }
9611 }
9612 gc_prof_timer_stop(objspace);
9613
9614 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9615 return TRUE;
9616}
9617
9618static void
9619gc_rest(rb_objspace_t *objspace)
9620{
9621 int marking = is_incremental_marking(objspace);
9622 int sweeping = is_lazy_sweeping(objspace);
9623
9624 if (marking || sweeping) {
9625 unsigned int lock_lev;
9626 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9627
9628 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9629
9630 if (is_incremental_marking(objspace)) {
9631 gc_marking_enter(objspace);
9632 gc_marks_rest(objspace);
9633 gc_marking_exit(objspace);
9634
9635 gc_sweep(objspace);
9636 }
9637
9638 if (is_lazy_sweeping(objspace)) {
9639 gc_sweeping_enter(objspace);
9640 gc_sweep_rest(objspace);
9641 gc_sweeping_exit(objspace);
9642 }
9643
9644 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9645 }
9646}
9647
9649 rb_objspace_t *objspace;
9650 unsigned int reason;
9651};
9652
9653static void
9654gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9655{
9656 int i = 0;
9657 if (is_marking(objspace)) {
9658 buff[i++] = 'M';
9659 if (is_full_marking(objspace)) buff[i++] = 'F';
9660 if (is_incremental_marking(objspace)) buff[i++] = 'I';
9661 }
9662 else if (is_sweeping(objspace)) {
9663 buff[i++] = 'S';
9664 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9665 }
9666 else {
9667 buff[i++] = 'N';
9668 }
9669 buff[i] = '\0';
9670}
9671
9672static const char *
9673gc_current_status(rb_objspace_t *objspace)
9674{
9675 static char buff[0x10];
9676 gc_current_status_fill(objspace, buff);
9677 return buff;
9678}
9679
9680#if PRINT_ENTER_EXIT_TICK
9681
9682static tick_t last_exit_tick;
9683static tick_t enter_tick;
9684static int enter_count = 0;
9685static char last_gc_status[0x10];
9686
9687static inline void
9688gc_record(rb_objspace_t *objspace, int direction, const char *event)
9689{
9690 if (direction == 0) { /* enter */
9691 enter_count++;
9692 enter_tick = tick();
9693 gc_current_status_fill(objspace, last_gc_status);
9694 }
9695 else { /* exit */
9696 tick_t exit_tick = tick();
9697 char current_gc_status[0x10];
9698 gc_current_status_fill(objspace, current_gc_status);
9699#if 1
9700 /* [last mutator time] [gc time] [event] */
9701 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9702 enter_tick - last_exit_tick,
9703 exit_tick - enter_tick,
9704 event,
9705 last_gc_status, current_gc_status,
9706 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9707 last_exit_tick = exit_tick;
9708#else
9709 /* [enter_tick] [gc time] [event] */
9710 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9711 enter_tick,
9712 exit_tick - enter_tick,
9713 event,
9714 last_gc_status, current_gc_status,
9715 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9716#endif
9717 }
9718}
9719#else /* PRINT_ENTER_EXIT_TICK */
9720static inline void
9721gc_record(rb_objspace_t *objspace, int direction, const char *event)
9722{
9723 /* null */
9724}
9725#endif /* PRINT_ENTER_EXIT_TICK */
9726
9727static const char *
9728gc_enter_event_cstr(enum gc_enter_event event)
9729{
9730 switch (event) {
9731 case gc_enter_event_start: return "start";
9732 case gc_enter_event_continue: return "continue";
9733 case gc_enter_event_rest: return "rest";
9734 case gc_enter_event_finalizer: return "finalizer";
9735 case gc_enter_event_rb_memerror: return "rb_memerror";
9736 }
9737 return NULL;
9738}
9739
9740static void
9741gc_enter_count(enum gc_enter_event event)
9742{
9743 switch (event) {
9744 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9745 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue); break;
9746 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9747 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9748 case gc_enter_event_rb_memerror: /* nothing */ break;
9749 }
9750}
9751
9752#ifndef MEASURE_GC
9753#define MEASURE_GC (objspace->flags.measure_gc)
9754#endif
9755
9756static bool current_process_time(struct timespec *ts);
9757
9758static void
9759gc_clock_start(struct timespec *ts)
9760{
9761 if (!current_process_time(ts)) {
9762 ts->tv_sec = 0;
9763 ts->tv_nsec = 0;
9764 }
9765}
9766
9767static uint64_t
9768gc_clock_end(struct timespec *ts)
9769{
9770 struct timespec end_time;
9771
9772 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
9773 current_process_time(&end_time) &&
9774 end_time.tv_sec >= ts->tv_sec) {
9775 return (uint64_t)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
9776 (end_time.tv_nsec - ts->tv_nsec);
9777 }
9778
9779 return 0;
9780}
9781
9782static inline void
9783gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9784{
9785 RB_VM_LOCK_ENTER_LEV(lock_lev);
9786
9787 switch (event) {
9788 case gc_enter_event_rest:
9789 if (!is_marking(objspace)) break;
9790 // fall through
9791 case gc_enter_event_start:
9792 case gc_enter_event_continue:
9793 // stop other ractors
9794 rb_vm_barrier();
9795 break;
9796 default:
9797 break;
9798 }
9799
9800 gc_enter_count(event);
9801 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9802 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9803
9804 during_gc = TRUE;
9805 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9806 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9807 gc_record(objspace, 0, gc_enter_event_cstr(event));
9808 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9809}
9810
9811static inline void
9812gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9813{
9814 GC_ASSERT(during_gc != 0);
9815
9816 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passed? */
9817 gc_record(objspace, 1, gc_enter_event_cstr(event));
9818 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9819 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9820 during_gc = FALSE;
9821
9822 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9823}
9824
9825static void
9826gc_marking_enter(rb_objspace_t *objspace)
9827{
9828 GC_ASSERT(during_gc != 0);
9829
9830 gc_clock_start(&objspace->profile.marking_start_time);
9831}
9832
9833static void
9834gc_marking_exit(rb_objspace_t *objspace)
9835{
9836 GC_ASSERT(during_gc != 0);
9837
9838 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
9839}
9840
9841static void
9842gc_sweeping_enter(rb_objspace_t *objspace)
9843{
9844 GC_ASSERT(during_gc != 0);
9845
9846 gc_clock_start(&objspace->profile.sweeping_start_time);
9847}
9848
9849static void
9850gc_sweeping_exit(rb_objspace_t *objspace)
9851{
9852 GC_ASSERT(during_gc != 0);
9853
9854 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
9855}
9856
9857static void *
9858gc_with_gvl(void *ptr)
9859{
9860 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9861 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9862}
9863
9864static int
9865garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9866{
9867 if (dont_gc_val()) return TRUE;
9868 if (ruby_thread_has_gvl_p()) {
9869 return garbage_collect(objspace, reason);
9870 }
9871 else {
9872 if (ruby_native_thread_p()) {
9873 struct objspace_and_reason oar;
9874 oar.objspace = objspace;
9875 oar.reason = reason;
9876 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9877 }
9878 else {
9879 /* no ruby thread */
9880 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9881 exit(EXIT_FAILURE);
9882 }
9883 }
9884}
9885
9886static int
9887gc_set_candidate_object_i(void *vstart, void *vend, size_t stride, void *data)
9888{
9889 rb_objspace_t *objspace = &rb_objspace;
9890 VALUE v = (VALUE)vstart;
9891 for (; v != (VALUE)vend; v += stride) {
9892 switch (BUILTIN_TYPE(v)) {
9893 case T_NONE:
9894 case T_ZOMBIE:
9895 break;
9896 case T_STRING:
9897 // precompute the string coderange. This both save time for when it will be
9898 // eventually needed, and avoid mutating heap pages after a potential fork.
9900 // fall through
9901 default:
9902 if (!RVALUE_OLD_P(v) && !RVALUE_WB_UNPROTECTED(v)) {
9903 RVALUE_AGE_SET_CANDIDATE(objspace, v);
9904 }
9905 }
9906 }
9907
9908 return 0;
9909}
9910
9911static VALUE
9912gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9913{
9914 rb_objspace_t *objspace = &rb_objspace;
9915 unsigned int reason = (GPR_FLAG_FULL_MARK |
9916 GPR_FLAG_IMMEDIATE_MARK |
9917 GPR_FLAG_IMMEDIATE_SWEEP |
9918 GPR_FLAG_METHOD);
9919
9920 /* For now, compact implies full mark / sweep, so ignore other flags */
9921 if (RTEST(compact)) {
9922 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9923
9924 reason |= GPR_FLAG_COMPACT;
9925 }
9926 else {
9927 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9928 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9929 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9930 }
9931
9932 garbage_collect(objspace, reason);
9933 gc_finalize_deferred(objspace);
9934
9935 return Qnil;
9936}
9937
9938static void
9939free_empty_pages(void)
9940{
9941 rb_objspace_t *objspace = &rb_objspace;
9942
9943 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9944 /* Move all empty pages to the tomb heap for freeing. */
9945 rb_size_pool_t *size_pool = &size_pools[i];
9946 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
9947 rb_heap_t *tomb_heap = SIZE_POOL_TOMB_HEAP(size_pool);
9948
9949 size_t freed_pages = 0;
9950
9951 struct heap_page **next_page_ptr = &heap->free_pages;
9952 struct heap_page *page = heap->free_pages;
9953 while (page) {
9954 /* All finalizers should have been ran in gc_start_internal, so there
9955 * should be no objects that require finalization. */
9956 GC_ASSERT(page->final_slots == 0);
9957
9958 struct heap_page *next_page = page->free_next;
9959
9960 if (page->free_slots == page->total_slots) {
9961 heap_unlink_page(objspace, heap, page);
9962 heap_add_page(objspace, size_pool, tomb_heap, page);
9963 freed_pages++;
9964 }
9965 else {
9966 *next_page_ptr = page;
9967 next_page_ptr = &page->free_next;
9968 }
9969
9970 page = next_page;
9971 }
9972
9973 *next_page_ptr = NULL;
9974
9975 size_pool_allocatable_pages_set(objspace, size_pool, size_pool->allocatable_pages + freed_pages);
9976 }
9977
9978 heap_pages_free_unused_pages(objspace);
9979}
9980
9981void
9982rb_gc_prepare_heap(void)
9983{
9984 rb_objspace_each_objects(gc_set_candidate_object_i, NULL);
9985 gc_start_internal(NULL, Qtrue, Qtrue, Qtrue, Qtrue, Qtrue);
9986 free_empty_pages();
9987
9988#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
9989 malloc_trim(0);
9990#endif
9991}
9992
9993static int
9994gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
9995{
9996 GC_ASSERT(!SPECIAL_CONST_P(obj));
9997
9998 switch (BUILTIN_TYPE(obj)) {
9999 case T_NONE:
10000 case T_NIL:
10001 case T_MOVED:
10002 case T_ZOMBIE:
10003 return FALSE;
10004 case T_SYMBOL:
10005 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
10006 return FALSE;
10007 }
10008 /* fall through */
10009 case T_STRING:
10010 case T_OBJECT:
10011 case T_FLOAT:
10012 case T_IMEMO:
10013 case T_ARRAY:
10014 case T_BIGNUM:
10015 case T_ICLASS:
10016 case T_MODULE:
10017 case T_REGEXP:
10018 case T_DATA:
10019 case T_MATCH:
10020 case T_STRUCT:
10021 case T_HASH:
10022 case T_FILE:
10023 case T_COMPLEX:
10024 case T_RATIONAL:
10025 case T_NODE:
10026 case T_CLASS:
10027 if (FL_TEST(obj, FL_FINALIZE)) {
10028 /* The finalizer table is a numtable. It looks up objects by address.
10029 * We can't mark the keys in the finalizer table because that would
10030 * prevent the objects from being collected. This check prevents
10031 * objects that are keys in the finalizer table from being moved
10032 * without directly pinning them. */
10033 GC_ASSERT(st_is_member(finalizer_table, obj));
10034
10035 return FALSE;
10036 }
10037 GC_ASSERT(RVALUE_MARKED(obj));
10038 GC_ASSERT(!RVALUE_PINNED(obj));
10039
10040 return TRUE;
10041
10042 default:
10043 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
10044 break;
10045 }
10046
10047 return FALSE;
10048}
10049
10050static VALUE
10051gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size)
10052{
10053 int marked;
10054 int wb_unprotected;
10055 int uncollectible;
10056 int age;
10057 RVALUE *dest = (RVALUE *)free;
10058 RVALUE *src = (RVALUE *)scan;
10059
10060 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
10061
10062 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
10063 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
10064
10065 GC_ASSERT(!RVALUE_MARKING((VALUE)src));
10066
10067 /* Save off bits for current object. */
10068 marked = rb_objspace_marked_object_p((VALUE)src);
10069 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
10070 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
10071 bool remembered = RVALUE_REMEMBERED((VALUE)src);
10072 age = RVALUE_AGE_GET((VALUE)src);
10073
10074 /* Clear bits for eventual T_MOVED */
10075 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
10076 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
10077 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
10078 CLEAR_IN_BITMAP(GET_HEAP_PAGE((VALUE)src)->remembered_bits, (VALUE)src);
10079
10080 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
10081 /* Resizing the st table could cause a malloc */
10082 DURING_GC_COULD_MALLOC_REGION_START();
10083 {
10084 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
10085 }
10086 DURING_GC_COULD_MALLOC_REGION_END();
10087 }
10088
10089 st_data_t srcid = (st_data_t)src, id;
10090
10091 /* If the source object's object_id has been seen, we need to update
10092 * the object to object id mapping. */
10093 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
10094 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
10095 /* Resizing the st table could cause a malloc */
10096 DURING_GC_COULD_MALLOC_REGION_START();
10097 {
10098 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
10099 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
10100 }
10101 DURING_GC_COULD_MALLOC_REGION_END();
10102 }
10103
10104 /* Move the object */
10105 memcpy(dest, src, MIN(src_slot_size, slot_size));
10106
10107 if (RVALUE_OVERHEAD > 0) {
10108 void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
10109 void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
10110
10111 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
10112 }
10113
10114 memset(src, 0, src_slot_size);
10115 RVALUE_AGE_RESET((VALUE)src);
10116
10117 /* Set bits for object in new location */
10118 if (remembered) {
10119 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (VALUE)dest);
10120 }
10121 else {
10122 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (VALUE)dest);
10123 }
10124
10125 if (marked) {
10126 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10127 }
10128 else {
10129 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10130 }
10131
10132 if (wb_unprotected) {
10133 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10134 }
10135 else {
10136 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10137 }
10138
10139 if (uncollectible) {
10140 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10141 }
10142 else {
10143 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10144 }
10145
10146 RVALUE_AGE_SET((VALUE)dest, age);
10147 /* Assign forwarding address */
10148 src->as.moved.flags = T_MOVED;
10149 src->as.moved.dummy = Qundef;
10150 src->as.moved.destination = (VALUE)dest;
10151 GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
10152
10153 return (VALUE)src;
10154}
10155
10156#if GC_CAN_COMPILE_COMPACTION
10157static int
10158compare_pinned_slots(const void *left, const void *right, void *dummy)
10159{
10160 struct heap_page *left_page;
10161 struct heap_page *right_page;
10162
10163 left_page = *(struct heap_page * const *)left;
10164 right_page = *(struct heap_page * const *)right;
10165
10166 return left_page->pinned_slots - right_page->pinned_slots;
10167}
10168
10169static int
10170compare_free_slots(const void *left, const void *right, void *dummy)
10171{
10172 struct heap_page *left_page;
10173 struct heap_page *right_page;
10174
10175 left_page = *(struct heap_page * const *)left;
10176 right_page = *(struct heap_page * const *)right;
10177
10178 return left_page->free_slots - right_page->free_slots;
10179}
10180
10181static void
10182gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func)
10183{
10184 for (int j = 0; j < SIZE_POOL_COUNT; j++) {
10185 rb_size_pool_t *size_pool = &size_pools[j];
10186
10187 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10188 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
10189 struct heap_page *page = 0, **page_list = malloc(size);
10190 size_t i = 0;
10191
10192 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10193 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10194 page_list[i++] = page;
10195 GC_ASSERT(page);
10196 }
10197
10198 GC_ASSERT((size_t)i == total_pages);
10199
10200 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
10201 * head of the list, so empty pages will end up at the start of the heap */
10202 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL);
10203
10204 /* Reset the eden heap */
10205 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10206
10207 for (i = 0; i < total_pages; i++) {
10208 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10209 if (page_list[i]->free_slots != 0) {
10210 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10211 }
10212 }
10213
10214 free(page_list);
10215 }
10216}
10217#endif
10218
10219static void
10220gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
10221{
10222 if (ARY_SHARED_P(v)) {
10223 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
10224
10225 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
10226
10227 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
10228 // If the root is embedded and its location has changed
10229 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10230 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
10231 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
10232 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
10233 }
10234 }
10235 else {
10236 long len = RARRAY_LEN(v);
10237
10238 if (len > 0) {
10239 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
10240 for (long i = 0; i < len; i++) {
10241 UPDATE_IF_MOVED(objspace, ptr[i]);
10242 }
10243 }
10244
10245 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10246 if (rb_ary_embeddable_p(v)) {
10247 rb_ary_make_embedded(v);
10248 }
10249 }
10250 }
10251}
10252
10253static void gc_ref_update_table_values_only(rb_objspace_t *objspace, st_table *tbl);
10254
10255static void
10256gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
10257{
10258 VALUE *ptr = ROBJECT_IVPTR(v);
10259
10260 if (rb_shape_obj_too_complex(v)) {
10261 gc_ref_update_table_values_only(objspace, ROBJECT_IV_HASH(v));
10262 return;
10263 }
10264
10265 size_t slot_size = rb_gc_obj_slot_size(v);
10266 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10267 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10268 // Object can be re-embedded
10269 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
10270 RB_FL_SET_RAW(v, ROBJECT_EMBED);
10271 xfree(ptr);
10272 ptr = ROBJECT(v)->as.ary;
10273 }
10274
10275 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10276 UPDATE_IF_MOVED(objspace, ptr[i]);
10277 }
10278}
10279
10280static int
10281hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10282{
10283 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10284
10285 if (gc_object_moved_p(objspace, (VALUE)*key)) {
10286 *key = rb_gc_location((VALUE)*key);
10287 }
10288
10289 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10290 *value = rb_gc_location((VALUE)*value);
10291 }
10292
10293 return ST_CONTINUE;
10294}
10295
10296static int
10297hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
10298{
10299 rb_objspace_t *objspace;
10300
10301 objspace = (rb_objspace_t *)argp;
10302
10303 if (gc_object_moved_p(objspace, (VALUE)key)) {
10304 return ST_REPLACE;
10305 }
10306
10307 if (gc_object_moved_p(objspace, (VALUE)value)) {
10308 return ST_REPLACE;
10309 }
10310 return ST_CONTINUE;
10311}
10312
10313static int
10314hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10315{
10316 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10317
10318 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10319 *value = rb_gc_location((VALUE)*value);
10320 }
10321
10322 return ST_CONTINUE;
10323}
10324
10325static int
10326hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
10327{
10328 rb_objspace_t *objspace;
10329
10330 objspace = (rb_objspace_t *)argp;
10331
10332 if (gc_object_moved_p(objspace, (VALUE)value)) {
10333 return ST_REPLACE;
10334 }
10335 return ST_CONTINUE;
10336}
10337
10338static void
10339gc_ref_update_table_values_only(rb_objspace_t *objspace, st_table *tbl)
10340{
10341 if (!tbl || tbl->num_entries == 0) return;
10342
10343 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10344 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10345 }
10346}
10347
10348void
10349rb_gc_ref_update_table_values_only(st_table *tbl)
10350{
10351 gc_ref_update_table_values_only(&rb_objspace, tbl);
10352}
10353
10354static void
10355gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
10356{
10357 if (!tbl || tbl->num_entries == 0) return;
10358
10359 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10360 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10361 }
10362}
10363
10364/* Update MOVED references in a VALUE=>VALUE st_table */
10365void
10366rb_gc_update_tbl_refs(st_table *ptr)
10367{
10368 rb_objspace_t *objspace = &rb_objspace;
10369 gc_update_table_refs(objspace, ptr);
10370}
10371
10372static void
10373gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
10374{
10375 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10376}
10377
10378static void
10379gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
10380{
10381 rb_method_definition_t *def = me->def;
10382
10383 UPDATE_IF_MOVED(objspace, me->owner);
10384 UPDATE_IF_MOVED(objspace, me->defined_class);
10385
10386 if (def) {
10387 switch (def->type) {
10388 case VM_METHOD_TYPE_ISEQ:
10389 if (def->body.iseq.iseqptr) {
10390 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
10391 }
10392 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
10393 break;
10394 case VM_METHOD_TYPE_ATTRSET:
10395 case VM_METHOD_TYPE_IVAR:
10396 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10397 break;
10398 case VM_METHOD_TYPE_BMETHOD:
10399 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10400 break;
10401 case VM_METHOD_TYPE_ALIAS:
10402 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
10403 return;
10404 case VM_METHOD_TYPE_REFINED:
10405 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
10406 break;
10407 case VM_METHOD_TYPE_CFUNC:
10408 case VM_METHOD_TYPE_ZSUPER:
10409 case VM_METHOD_TYPE_MISSING:
10410 case VM_METHOD_TYPE_OPTIMIZED:
10411 case VM_METHOD_TYPE_UNDEF:
10412 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10413 break;
10414 }
10415 }
10416}
10417
10418static void
10419gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
10420{
10421 long i;
10422
10423 for (i=0; i<n; i++) {
10424 UPDATE_IF_MOVED(objspace, values[i]);
10425 }
10426}
10427
10428void
10429rb_gc_update_values(long n, VALUE *values)
10430{
10431 gc_update_values(&rb_objspace, n, values);
10432}
10433
10434static bool
10435moved_or_living_object_strictly_p(rb_objspace_t *objspace, VALUE obj)
10436{
10437 return obj &&
10438 is_pointer_to_heap(objspace, (void *)obj) &&
10439 (is_live_object(objspace, obj) || BUILTIN_TYPE(obj) == T_MOVED);
10440}
10441
10442static void
10443gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
10444{
10445 switch (imemo_type(obj)) {
10446 case imemo_env:
10447 {
10448 rb_env_t *env = (rb_env_t *)obj;
10449 if (LIKELY(env->ep)) {
10450 // just after newobj() can be NULL here.
10451 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
10452 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10453 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
10454 }
10455 }
10456 break;
10457 case imemo_cref:
10458 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10459 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10460 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10461 break;
10462 case imemo_svar:
10463 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10464 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10465 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10466 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10467 break;
10468 case imemo_throw_data:
10469 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10470 break;
10471 case imemo_ifunc:
10472 break;
10473 case imemo_memo:
10474 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10475 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10476 break;
10477 case imemo_ment:
10478 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10479 break;
10480 case imemo_iseq:
10481 rb_iseq_mark_and_move((rb_iseq_t *)obj, true);
10482 break;
10483 case imemo_ast:
10484 rb_ast_update_references((rb_ast_t *)obj);
10485 break;
10486 case imemo_callcache:
10487 {
10488 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
10489
10490 if (!cc->klass) {
10491 // already invalidated
10492 }
10493 else {
10494 if (moved_or_living_object_strictly_p(objspace, cc->klass) &&
10495 moved_or_living_object_strictly_p(objspace, (VALUE)cc->cme_)) {
10496 UPDATE_IF_MOVED(objspace, cc->klass);
10497 TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
10498 }
10499 else {
10500 vm_cc_invalidate(cc);
10501 }
10502 }
10503 }
10504 break;
10505 case imemo_constcache:
10506 {
10508 UPDATE_IF_MOVED(objspace, ice->value);
10509 }
10510 break;
10511 case imemo_parser_strterm:
10512 case imemo_tmpbuf:
10513 case imemo_callinfo:
10514 break;
10515 default:
10516 rb_bug("not reachable %d", imemo_type(obj));
10517 break;
10518 }
10519}
10520
10521static enum rb_id_table_iterator_result
10522check_id_table_move(VALUE value, void *data)
10523{
10524 rb_objspace_t *objspace = (rb_objspace_t *)data;
10525
10526 if (gc_object_moved_p(objspace, (VALUE)value)) {
10527 return ID_TABLE_REPLACE;
10528 }
10529
10530 return ID_TABLE_CONTINUE;
10531}
10532
10533/* Returns the new location of an object, if it moved. Otherwise returns
10534 * the existing location. */
10535VALUE
10536rb_gc_location(VALUE value)
10537{
10538
10539 VALUE destination;
10540
10541 if (!SPECIAL_CONST_P(value)) {
10542 void *poisoned = asan_unpoison_object_temporary(value);
10543
10544 if (BUILTIN_TYPE(value) == T_MOVED) {
10545 destination = (VALUE)RMOVED(value)->destination;
10546 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
10547 }
10548 else {
10549 destination = value;
10550 }
10551
10552 /* Re-poison slot if it's not the one we want */
10553 if (poisoned) {
10554 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
10555 asan_poison_object(value);
10556 }
10557 }
10558 else {
10559 destination = value;
10560 }
10561
10562 return destination;
10563}
10564
10565static enum rb_id_table_iterator_result
10566update_id_table(VALUE *value, void *data, int existing)
10567{
10568 rb_objspace_t *objspace = (rb_objspace_t *)data;
10569
10570 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10571 *value = rb_gc_location((VALUE)*value);
10572 }
10573
10574 return ID_TABLE_CONTINUE;
10575}
10576
10577static void
10578update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10579{
10580 if (tbl) {
10581 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10582 }
10583}
10584
10585static enum rb_id_table_iterator_result
10586update_cc_tbl_i(VALUE ccs_ptr, void *data)
10587{
10588 rb_objspace_t *objspace = (rb_objspace_t *)data;
10589 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
10590 VM_ASSERT(vm_ccs_p(ccs));
10591
10592 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
10593 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
10594 }
10595
10596 for (int i=0; i<ccs->len; i++) {
10597 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
10598 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
10599 }
10600 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
10601 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
10602 }
10603 }
10604
10605 // do not replace
10606 return ID_TABLE_CONTINUE;
10607}
10608
10609static void
10610update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
10611{
10612 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
10613 if (tbl) {
10614 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10615 }
10616}
10617
10618static enum rb_id_table_iterator_result
10619update_cvc_tbl_i(VALUE cvc_entry, void *data)
10620{
10621 struct rb_cvar_class_tbl_entry *entry;
10622 rb_objspace_t * objspace = (rb_objspace_t *)data;
10623
10624 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10625
10626 if (entry->cref) {
10627 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
10628 }
10629
10630 entry->class_value = rb_gc_location(entry->class_value);
10631
10632 return ID_TABLE_CONTINUE;
10633}
10634
10635static void
10636update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10637{
10638 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10639 if (tbl) {
10640 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10641 }
10642}
10643
10644static enum rb_id_table_iterator_result
10645mark_cvc_tbl_i(VALUE cvc_entry, void *data)
10646{
10647 rb_objspace_t *objspace = (rb_objspace_t *)data;
10648 struct rb_cvar_class_tbl_entry *entry;
10649
10650 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10651
10652 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
10653 gc_mark(objspace, (VALUE) entry->cref);
10654
10655 return ID_TABLE_CONTINUE;
10656}
10657
10658static void
10659mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10660{
10661 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10662 if (tbl) {
10663 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10664 }
10665}
10666
10667static enum rb_id_table_iterator_result
10668update_const_table(VALUE value, void *data)
10669{
10670 rb_const_entry_t *ce = (rb_const_entry_t *)value;
10671 rb_objspace_t * objspace = (rb_objspace_t *)data;
10672
10673 if (gc_object_moved_p(objspace, ce->value)) {
10674 ce->value = rb_gc_location(ce->value);
10675 }
10676
10677 if (gc_object_moved_p(objspace, ce->file)) {
10678 ce->file = rb_gc_location(ce->file);
10679 }
10680
10681 return ID_TABLE_CONTINUE;
10682}
10683
10684static void
10685update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10686{
10687 if (!tbl) return;
10688 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10689}
10690
10691static void
10692update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
10693{
10694 while (entry) {
10695 UPDATE_IF_MOVED(objspace, entry->klass);
10696 entry = entry->next;
10697 }
10698}
10699
10700static void
10701update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
10702{
10703 UPDATE_IF_MOVED(objspace, ext->origin_);
10704 UPDATE_IF_MOVED(objspace, ext->includer);
10705 UPDATE_IF_MOVED(objspace, ext->refined_class);
10706 update_subclass_entries(objspace, ext->subclasses);
10707}
10708
10709static void
10710update_superclasses(rb_objspace_t *objspace, VALUE obj)
10711{
10712 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10713 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10714 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10715 }
10716 }
10717}
10718
10719static void
10720gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
10721{
10722 RVALUE *any = RANY(obj);
10723
10724 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
10725
10726 if (FL_TEST(obj, FL_EXIVAR)) {
10727 rb_ref_update_generic_ivar(obj);
10728 }
10729
10730 switch (BUILTIN_TYPE(obj)) {
10731 case T_CLASS:
10732 if (FL_TEST(obj, FL_SINGLETON)) {
10733 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
10734 }
10735 // Continue to the shared T_CLASS/T_MODULE
10736 case T_MODULE:
10737 if (RCLASS_SUPER((VALUE)obj)) {
10738 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10739 }
10740 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10741 update_cc_tbl(objspace, obj);
10742 update_cvc_tbl(objspace, obj);
10743 update_superclasses(objspace, obj);
10744
10745 if (rb_shape_obj_too_complex(obj)) {
10746 gc_ref_update_table_values_only(objspace, RCLASS_IV_HASH(obj));
10747 }
10748 else {
10749 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10750 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10751 }
10752 }
10753
10754 update_class_ext(objspace, RCLASS_EXT(obj));
10755 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10756
10757 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
10758 break;
10759
10760 case T_ICLASS:
10761 if (RICLASS_OWNS_M_TBL_P(obj)) {
10762 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10763 }
10764 if (RCLASS_SUPER((VALUE)obj)) {
10765 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10766 }
10767 update_class_ext(objspace, RCLASS_EXT(obj));
10768 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10769 update_cc_tbl(objspace, obj);
10770 break;
10771
10772 case T_IMEMO:
10773 gc_ref_update_imemo(objspace, obj);
10774 return;
10775
10776 case T_NIL:
10777 case T_FIXNUM:
10778 case T_NODE:
10779 case T_MOVED:
10780 case T_NONE:
10781 /* These can't move */
10782 return;
10783
10784 case T_ARRAY:
10785 gc_ref_update_array(objspace, obj);
10786 break;
10787
10788 case T_HASH:
10789 gc_ref_update_hash(objspace, obj);
10790 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10791 break;
10792
10793 case T_STRING:
10794 {
10795 if (STR_SHARED_P(obj)) {
10796 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
10797 }
10798
10799 /* If, after move the string is not embedded, and can fit in the
10800 * slot it's been placed in, then re-embed it. */
10801 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10802 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10803 rb_str_make_embedded(obj);
10804 }
10805 }
10806
10807 break;
10808 }
10809 case T_DATA:
10810 /* Call the compaction callback, if it exists */
10811 {
10812 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
10813 if (ptr) {
10814 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.type)) {
10815 size_t *offset_list = (size_t *)RANY(obj)->as.typeddata.type->function.dmark;
10816
10817 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
10818 VALUE *ref = (VALUE *)((char *)ptr + offset);
10819 if (SPECIAL_CONST_P(*ref)) continue;
10820 *ref = rb_gc_location(*ref);
10821 }
10822 }
10823 else if (RTYPEDDATA_P(obj)) {
10824 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10825 if (compact_func) (*compact_func)(ptr);
10826 }
10827 }
10828 }
10829 break;
10830
10831 case T_OBJECT:
10832 gc_ref_update_object(objspace, obj);
10833 break;
10834
10835 case T_FILE:
10836 if (any->as.file.fptr) {
10837 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10838 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10839 UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10840 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10841 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10842 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10843 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10844 }
10845 break;
10846 case T_REGEXP:
10847 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10848 break;
10849
10850 case T_SYMBOL:
10851 if (DYNAMIC_SYM_P((VALUE)any)) {
10852 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10853 }
10854 break;
10855
10856 case T_FLOAT:
10857 case T_BIGNUM:
10858 break;
10859
10860 case T_MATCH:
10861 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10862
10863 if (any->as.match.str) {
10864 UPDATE_IF_MOVED(objspace, any->as.match.str);
10865 }
10866 break;
10867
10868 case T_RATIONAL:
10869 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10870 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10871 break;
10872
10873 case T_COMPLEX:
10874 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10875 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10876
10877 break;
10878
10879 case T_STRUCT:
10880 {
10881 long i, len = RSTRUCT_LEN(obj);
10882 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10883
10884 for (i = 0; i < len; i++) {
10885 UPDATE_IF_MOVED(objspace, ptr[i]);
10886 }
10887 }
10888 break;
10889 default:
10890#if GC_DEBUG
10891 rb_gcdebug_print_obj_condition((VALUE)obj);
10892 rb_obj_info_dump(obj);
10893 rb_bug("unreachable");
10894#endif
10895 break;
10896
10897 }
10898
10899 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10900
10901 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10902}
10903
10904static int
10905gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10906{
10907 VALUE v = (VALUE)vstart;
10908 asan_unlock_freelist(page);
10909 asan_lock_freelist(page);
10910 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
10911 page->flags.has_remembered_objects = FALSE;
10912
10913 /* For each object on the page */
10914 for (; v != (VALUE)vend; v += stride) {
10915 void *poisoned = asan_unpoison_object_temporary(v);
10916
10917 switch (BUILTIN_TYPE(v)) {
10918 case T_NONE:
10919 case T_MOVED:
10920 case T_ZOMBIE:
10921 break;
10922 default:
10923 if (RVALUE_WB_UNPROTECTED(v)) {
10924 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
10925 }
10926 if (RVALUE_REMEMBERED(v)) {
10927 page->flags.has_remembered_objects = TRUE;
10928 }
10929 if (page->flags.before_sweep) {
10930 if (RVALUE_MARKED(v)) {
10931 gc_update_object_references(objspace, v);
10932 }
10933 }
10934 else {
10935 gc_update_object_references(objspace, v);
10936 }
10937 }
10938
10939 if (poisoned) {
10940 asan_poison_object(v);
10941 }
10942 }
10943
10944 return 0;
10945}
10946
10947extern rb_symbols_t ruby_global_symbols;
10948#define global_symbols ruby_global_symbols
10949
10950static void
10951gc_update_references(rb_objspace_t *objspace)
10952{
10953 objspace->flags.during_reference_updating = true;
10954
10955 rb_execution_context_t *ec = GET_EC();
10956 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10957
10958 struct heap_page *page = NULL;
10959
10960 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10961 bool should_set_mark_bits = TRUE;
10962 rb_size_pool_t *size_pool = &size_pools[i];
10963 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10964
10965 ccan_list_for_each(&heap->pages, page, page_node) {
10966 uintptr_t start = (uintptr_t)page->start;
10967 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10968
10969 gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10970 if (page == heap->sweeping_page) {
10971 should_set_mark_bits = FALSE;
10972 }
10973 if (should_set_mark_bits) {
10974 gc_setup_mark_bits(page);
10975 }
10976 }
10977 }
10978 rb_vm_update_references(vm);
10979 rb_gc_update_global_tbl();
10980 global_symbols.ids = rb_gc_location(global_symbols.ids);
10981 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10982 gc_ref_update_table_values_only(objspace, objspace->obj_to_id_tbl);
10983 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10984 gc_update_table_refs(objspace, global_symbols.str_sym);
10985 gc_update_table_refs(objspace, finalizer_table);
10986
10987 objspace->flags.during_reference_updating = false;
10988}
10989
10990#if GC_CAN_COMPILE_COMPACTION
10991/*
10992 * call-seq:
10993 * GC.latest_compact_info -> hash
10994 *
10995 * Returns information about object moved in the most recent \GC compaction.
10996 *
10997 * The returned hash has two keys :considered and :moved. The hash for
10998 * :considered lists the number of objects that were considered for movement
10999 * by the compactor, and the :moved hash lists the number of objects that
11000 * were actually moved. Some objects can't be moved (maybe they were pinned)
11001 * so these numbers can be used to calculate compaction efficiency.
11002 */
11003static VALUE
11004gc_compact_stats(VALUE self)
11005{
11006 size_t i;
11007 rb_objspace_t *objspace = &rb_objspace;
11008 VALUE h = rb_hash_new();
11009 VALUE considered = rb_hash_new();
11010 VALUE moved = rb_hash_new();
11011 VALUE moved_up = rb_hash_new();
11012 VALUE moved_down = rb_hash_new();
11013
11014 for (i=0; i<T_MASK; i++) {
11015 if (objspace->rcompactor.considered_count_table[i]) {
11016 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
11017 }
11018
11019 if (objspace->rcompactor.moved_count_table[i]) {
11020 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
11021 }
11022
11023 if (objspace->rcompactor.moved_up_count_table[i]) {
11024 rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
11025 }
11026
11027 if (objspace->rcompactor.moved_down_count_table[i]) {
11028 rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
11029 }
11030 }
11031
11032 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
11033 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
11034 rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
11035 rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
11036
11037 return h;
11038}
11039#else
11040# define gc_compact_stats rb_f_notimplement
11041#endif
11042
11043#if GC_CAN_COMPILE_COMPACTION
11044static void
11045root_obj_check_moved_i(const char *category, VALUE obj, void *data)
11046{
11047 if (gc_object_moved_p(&rb_objspace, obj)) {
11048 rb_bug("ROOT %s points to MOVED: %p -> %s", category, (void *)obj, obj_info(rb_gc_location(obj)));
11049 }
11050}
11051
11052static void
11053reachable_object_check_moved_i(VALUE ref, void *data)
11054{
11055 VALUE parent = (VALUE)data;
11056 if (gc_object_moved_p(&rb_objspace, ref)) {
11057 rb_bug("Object %s points to MOVED: %p -> %s", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
11058 }
11059}
11060
11061static int
11062heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
11063{
11064 VALUE v = (VALUE)vstart;
11065 for (; v != (VALUE)vend; v += stride) {
11066 if (gc_object_moved_p(&rb_objspace, v)) {
11067 /* Moved object still on the heap, something may have a reference. */
11068 }
11069 else {
11070 void *poisoned = asan_unpoison_object_temporary(v);
11071
11072 switch (BUILTIN_TYPE(v)) {
11073 case T_NONE:
11074 case T_ZOMBIE:
11075 break;
11076 default:
11077 if (!rb_objspace_garbage_object_p(v)) {
11078 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
11079 }
11080 }
11081
11082 if (poisoned) {
11083 GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
11084 asan_poison_object(v);
11085 }
11086 }
11087 }
11088
11089 return 0;
11090}
11091
11092/*
11093 * call-seq:
11094 * GC.compact
11095 *
11096 * This function compacts objects together in Ruby's heap. It eliminates
11097 * unused space (or fragmentation) in the heap by moving objects in to that
11098 * unused space. This function returns a hash which contains statistics about
11099 * which objects were moved. See <tt>GC.latest_gc_info</tt> for details about
11100 * compaction statistics.
11101 *
11102 * This method is implementation specific and not expected to be implemented
11103 * in any implementation besides MRI.
11104 *
11105 * To test whether \GC compaction is supported, use the idiom:
11106 *
11107 * GC.respond_to?(:compact)
11108 */
11109static VALUE
11110gc_compact(VALUE self)
11111{
11112 /* Run GC with compaction enabled */
11113 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
11114
11115 return gc_compact_stats(self);
11116}
11117#else
11118# define gc_compact rb_f_notimplement
11119#endif
11120
11121#if GC_CAN_COMPILE_COMPACTION
11122
11123struct desired_compaction_pages_i_data {
11124 rb_objspace_t *objspace;
11125 size_t required_slots[SIZE_POOL_COUNT];
11126};
11127
11128static int
11129desired_compaction_pages_i(struct heap_page *page, void *data)
11130{
11131 struct desired_compaction_pages_i_data *tdata = data;
11132 rb_objspace_t *objspace = tdata->objspace;
11133 VALUE vstart = (VALUE)page->start;
11134 VALUE vend = vstart + (VALUE)(page->total_slots * page->size_pool->slot_size);
11135
11136
11137 for (VALUE v = vstart; v != vend; v += page->size_pool->slot_size) {
11138 /* skip T_NONEs; they won't be moved */
11139 void *poisoned = asan_unpoison_object_temporary(v);
11140 if (BUILTIN_TYPE(v) == T_NONE) {
11141 if (poisoned) {
11142 asan_poison_object(v);
11143 }
11144 continue;
11145 }
11146
11147 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v);
11148 size_t dest_pool_idx = dest_pool - size_pools;
11149 tdata->required_slots[dest_pool_idx]++;
11150 }
11151
11152 return 0;
11153}
11154
11155static VALUE
11156gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE expand_heap, VALUE toward_empty)
11157{
11158 rb_objspace_t *objspace = &rb_objspace;
11159
11160 /* Clear the heap. */
11161 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qfalse);
11162
11163 if (RTEST(double_heap)) {
11164 rb_warn("double_heap is deprecated, please use expand_heap instead");
11165 }
11166
11167 RB_VM_LOCK_ENTER();
11168 {
11169 gc_rest(objspace);
11170
11171 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
11172 if (RTEST(expand_heap)) {
11173 struct desired_compaction_pages_i_data desired_compaction = {
11174 .objspace = objspace,
11175 .required_slots = {0},
11176 };
11177 /* Work out how many objects want to be in each size pool, taking account of moves */
11178 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
11179
11180 /* Find out which pool has the most pages */
11181 size_t max_existing_pages = 0;
11182 for(int i = 0; i < SIZE_POOL_COUNT; i++) {
11183 rb_size_pool_t *size_pool = &size_pools[i];
11184 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11185 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
11186 }
11187 /* Add pages to each size pool so that compaction is guaranteed to move every object */
11188 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11189 rb_size_pool_t *size_pool = &size_pools[i];
11190 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11191
11192 size_t pages_to_add = 0;
11193 /*
11194 * Step 1: Make sure every pool has the same number of pages, by adding empty pages
11195 * to smaller pools. This is required to make sure the compact cursor can advance
11196 * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
11197 * compact cursors met" condition on some pools before fully compacting others
11198 */
11199 pages_to_add += max_existing_pages - heap->total_pages;
11200 /*
11201 * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
11202 * that want to be in that size pool, whether moved into it or moved within it
11203 */
11204 pages_to_add += slots_to_pages_for_size_pool(objspace, size_pool, desired_compaction.required_slots[i]);
11205 /*
11206 * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
11207 * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
11208 */
11209 pages_to_add += 2;
11210
11211 heap_add_pages(objspace, size_pool, heap, pages_to_add);
11212 }
11213 }
11214 else if (RTEST(double_heap)) {
11215 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11216 rb_size_pool_t *size_pool = &size_pools[i];
11217 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11218 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
11219 }
11220
11221 }
11222
11223 if (RTEST(toward_empty)) {
11224 objspace->rcompactor.compare_func = compare_free_slots;
11225 }
11226 }
11227 RB_VM_LOCK_LEAVE();
11228
11229 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
11230
11231 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11232 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11233
11234 objspace->rcompactor.compare_func = NULL;
11235 return gc_compact_stats(self);
11236}
11237#else
11238# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11239#endif
11240
11241VALUE
11242rb_gc_start(void)
11243{
11244 rb_gc();
11245 return Qnil;
11246}
11247
11248void
11249rb_gc(void)
11250{
11251 unless_objspace(objspace) { return; }
11252 unsigned int reason = GPR_DEFAULT_REASON;
11253 garbage_collect(objspace, reason);
11254}
11255
11256int
11257rb_during_gc(void)
11258{
11259 unless_objspace(objspace) { return FALSE; }
11260 return during_gc;
11261}
11262
11263#if RGENGC_PROFILE >= 2
11264
11265static const char *type_name(int type, VALUE obj);
11266
11267static void
11268gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
11269{
11270 VALUE result = rb_hash_new_with_size(T_MASK);
11271 int i;
11272 for (i=0; i<T_MASK; i++) {
11273 const char *type = type_name(i, 0);
11274 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
11275 }
11276 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
11277}
11278#endif
11279
11280size_t
11281rb_gc_count(void)
11282{
11283 return rb_objspace.profile.count;
11284}
11285
11286static VALUE
11287gc_count(rb_execution_context_t *ec, VALUE self)
11288{
11289 return SIZET2NUM(rb_gc_count());
11290}
11291
11292static VALUE
11293gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
11294{
11295 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11296 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11297#if RGENGC_ESTIMATE_OLDMALLOC
11298 static VALUE sym_oldmalloc;
11299#endif
11300 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11301 static VALUE sym_none, sym_marking, sym_sweeping;
11302 static VALUE sym_weak_references_count, sym_retained_weak_references_count;
11303 VALUE hash = Qnil, key = Qnil;
11304 VALUE major_by, need_major_by;
11305 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11306
11307 if (SYMBOL_P(hash_or_key)) {
11308 key = hash_or_key;
11309 }
11310 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
11311 hash = hash_or_key;
11312 }
11313 else {
11314 rb_raise(rb_eTypeError, "non-hash or symbol given");
11315 }
11316
11317 if (NIL_P(sym_major_by)) {
11318#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11319 S(major_by);
11320 S(gc_by);
11321 S(immediate_sweep);
11322 S(have_finalizer);
11323 S(state);
11324 S(need_major_by);
11325
11326 S(stress);
11327 S(nofree);
11328 S(oldgen);
11329 S(shady);
11330 S(force);
11331#if RGENGC_ESTIMATE_OLDMALLOC
11332 S(oldmalloc);
11333#endif
11334 S(newobj);
11335 S(malloc);
11336 S(method);
11337 S(capi);
11338
11339 S(none);
11340 S(marking);
11341 S(sweeping);
11342
11343 S(weak_references_count);
11344 S(retained_weak_references_count);
11345#undef S
11346 }
11347
11348#define SET(name, attr) \
11349 if (key == sym_##name) \
11350 return (attr); \
11351 else if (hash != Qnil) \
11352 rb_hash_aset(hash, sym_##name, (attr));
11353
11354 major_by =
11355 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11356 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11357 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11358 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11359#if RGENGC_ESTIMATE_OLDMALLOC
11360 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11361#endif
11362 Qnil;
11363 SET(major_by, major_by);
11364
11365 if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
11366 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11367 need_major_by =
11368 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11369 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11370 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11371 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11372#if RGENGC_ESTIMATE_OLDMALLOC
11373 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11374#endif
11375 Qnil;
11376 SET(need_major_by, need_major_by);
11377 }
11378
11379 SET(gc_by,
11380 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11381 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11382 (flags & GPR_FLAG_METHOD) ? sym_method :
11383 (flags & GPR_FLAG_CAPI) ? sym_capi :
11384 (flags & GPR_FLAG_STRESS) ? sym_stress :
11385 Qnil
11386 );
11387
11388 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11389 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11390
11391 if (orig_flags == 0) {
11392 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11393 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11394 }
11395
11396 SET(weak_references_count, LONG2FIX(objspace->profile.weak_references_count));
11397 SET(retained_weak_references_count, LONG2FIX(objspace->profile.retained_weak_references_count));
11398#undef SET
11399
11400 if (!NIL_P(key)) {/* matched key should return above */
11401 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11402 }
11403
11404 return hash;
11405}
11406
11407VALUE
11408rb_gc_latest_gc_info(VALUE key)
11409{
11410 rb_objspace_t *objspace = &rb_objspace;
11411 return gc_info_decode(objspace, key, 0);
11412}
11413
11414static VALUE
11415gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
11416{
11417 rb_objspace_t *objspace = &rb_objspace;
11418
11419 if (NIL_P(arg)) {
11420 arg = rb_hash_new();
11421 }
11422 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
11423 rb_raise(rb_eTypeError, "non-hash or symbol given");
11424 }
11425
11426 return gc_info_decode(objspace, arg, 0);
11427}
11428
11429enum gc_stat_sym {
11430 gc_stat_sym_count,
11431 gc_stat_sym_time,
11432 gc_stat_sym_marking_time,
11433 gc_stat_sym_sweeping_time,
11434 gc_stat_sym_heap_allocated_pages,
11435 gc_stat_sym_heap_sorted_length,
11436 gc_stat_sym_heap_allocatable_pages,
11437 gc_stat_sym_heap_available_slots,
11438 gc_stat_sym_heap_live_slots,
11439 gc_stat_sym_heap_free_slots,
11440 gc_stat_sym_heap_final_slots,
11441 gc_stat_sym_heap_marked_slots,
11442 gc_stat_sym_heap_eden_pages,
11443 gc_stat_sym_heap_tomb_pages,
11444 gc_stat_sym_total_allocated_pages,
11445 gc_stat_sym_total_freed_pages,
11446 gc_stat_sym_total_allocated_objects,
11447 gc_stat_sym_total_freed_objects,
11448 gc_stat_sym_malloc_increase_bytes,
11449 gc_stat_sym_malloc_increase_bytes_limit,
11450 gc_stat_sym_minor_gc_count,
11451 gc_stat_sym_major_gc_count,
11452 gc_stat_sym_compact_count,
11453 gc_stat_sym_read_barrier_faults,
11454 gc_stat_sym_total_moved_objects,
11455 gc_stat_sym_remembered_wb_unprotected_objects,
11456 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11457 gc_stat_sym_old_objects,
11458 gc_stat_sym_old_objects_limit,
11459#if RGENGC_ESTIMATE_OLDMALLOC
11460 gc_stat_sym_oldmalloc_increase_bytes,
11461 gc_stat_sym_oldmalloc_increase_bytes_limit,
11462#endif
11463 gc_stat_sym_weak_references_count,
11464#if RGENGC_PROFILE
11465 gc_stat_sym_total_generated_normal_object_count,
11466 gc_stat_sym_total_generated_shady_object_count,
11467 gc_stat_sym_total_shade_operation_count,
11468 gc_stat_sym_total_promoted_count,
11469 gc_stat_sym_total_remembered_normal_object_count,
11470 gc_stat_sym_total_remembered_shady_object_count,
11471#endif
11472 gc_stat_sym_last
11473};
11474
11475static VALUE gc_stat_symbols[gc_stat_sym_last];
11476
11477static void
11478setup_gc_stat_symbols(void)
11479{
11480 if (gc_stat_symbols[0] == 0) {
11481#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11482 S(count);
11483 S(time);
11484 S(marking_time),
11485 S(sweeping_time),
11486 S(heap_allocated_pages);
11487 S(heap_sorted_length);
11488 S(heap_allocatable_pages);
11489 S(heap_available_slots);
11490 S(heap_live_slots);
11491 S(heap_free_slots);
11492 S(heap_final_slots);
11493 S(heap_marked_slots);
11494 S(heap_eden_pages);
11495 S(heap_tomb_pages);
11496 S(total_allocated_pages);
11497 S(total_freed_pages);
11498 S(total_allocated_objects);
11499 S(total_freed_objects);
11500 S(malloc_increase_bytes);
11501 S(malloc_increase_bytes_limit);
11502 S(minor_gc_count);
11503 S(major_gc_count);
11504 S(compact_count);
11505 S(read_barrier_faults);
11506 S(total_moved_objects);
11507 S(remembered_wb_unprotected_objects);
11508 S(remembered_wb_unprotected_objects_limit);
11509 S(old_objects);
11510 S(old_objects_limit);
11511#if RGENGC_ESTIMATE_OLDMALLOC
11512 S(oldmalloc_increase_bytes);
11513 S(oldmalloc_increase_bytes_limit);
11514#endif
11515 S(weak_references_count);
11516#if RGENGC_PROFILE
11517 S(total_generated_normal_object_count);
11518 S(total_generated_shady_object_count);
11519 S(total_shade_operation_count);
11520 S(total_promoted_count);
11521 S(total_remembered_normal_object_count);
11522 S(total_remembered_shady_object_count);
11523#endif /* RGENGC_PROFILE */
11524#undef S
11525 }
11526}
11527
11528static uint64_t
11529ns_to_ms(uint64_t ns)
11530{
11531 return ns / (1000 * 1000);
11532}
11533
11534static size_t
11535gc_stat_internal(VALUE hash_or_sym)
11536{
11537 rb_objspace_t *objspace = &rb_objspace;
11538 VALUE hash = Qnil, key = Qnil;
11539
11540 setup_gc_stat_symbols();
11541
11542 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11543 hash = hash_or_sym;
11544 }
11545 else if (SYMBOL_P(hash_or_sym)) {
11546 key = hash_or_sym;
11547 }
11548 else {
11549 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11550 }
11551
11552#define SET(name, attr) \
11553 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11554 return attr; \
11555 else if (hash != Qnil) \
11556 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11557
11558 SET(count, objspace->profile.count);
11559 SET(time, (size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns)); // TODO: UINT64T2NUM
11560 SET(marking_time, (size_t)ns_to_ms(objspace->profile.marking_time_ns));
11561 SET(sweeping_time, (size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
11562
11563 /* implementation dependent counters */
11564 SET(heap_allocated_pages, heap_allocated_pages);
11565 SET(heap_sorted_length, heap_pages_sorted_length);
11566 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11567 SET(heap_available_slots, objspace_available_slots(objspace));
11568 SET(heap_live_slots, objspace_live_slots(objspace));
11569 SET(heap_free_slots, objspace_free_slots(objspace));
11570 SET(heap_final_slots, heap_pages_final_slots);
11571 SET(heap_marked_slots, objspace->marked_slots);
11572 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11573 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11574 SET(total_allocated_pages, total_allocated_pages(objspace));
11575 SET(total_freed_pages, total_freed_pages(objspace));
11576 SET(total_allocated_objects, total_allocated_objects(objspace));
11577 SET(total_freed_objects, total_freed_objects(objspace));
11578 SET(malloc_increase_bytes, malloc_increase);
11579 SET(malloc_increase_bytes_limit, malloc_limit);
11580 SET(minor_gc_count, objspace->profile.minor_gc_count);
11581 SET(major_gc_count, objspace->profile.major_gc_count);
11582 SET(compact_count, objspace->profile.compact_count);
11583 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11584 SET(total_moved_objects, objspace->rcompactor.total_moved);
11585 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11586 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11587 SET(old_objects, objspace->rgengc.old_objects);
11588 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11589#if RGENGC_ESTIMATE_OLDMALLOC
11590 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11591 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11592#endif
11593
11594#if RGENGC_PROFILE
11595 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11596 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11597 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11598 SET(total_promoted_count, objspace->profile.total_promoted_count);
11599 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11600 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11601#endif /* RGENGC_PROFILE */
11602#undef SET
11603
11604 if (!NIL_P(key)) { /* matched key should return above */
11605 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11606 }
11607
11608#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11609 if (hash != Qnil) {
11610 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11611 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11612 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
11613 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
11614 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11615 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11616 }
11617#endif
11618
11619 return 0;
11620}
11621
11622static VALUE
11623gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
11624{
11625 if (NIL_P(arg)) {
11626 arg = rb_hash_new();
11627 }
11628 else if (SYMBOL_P(arg)) {
11629 size_t value = gc_stat_internal(arg);
11630 return SIZET2NUM(value);
11631 }
11632 else if (RB_TYPE_P(arg, T_HASH)) {
11633 // ok
11634 }
11635 else {
11636 rb_raise(rb_eTypeError, "non-hash or symbol given");
11637 }
11638
11639 gc_stat_internal(arg);
11640 return arg;
11641}
11642
11643size_t
11644rb_gc_stat(VALUE key)
11645{
11646 if (SYMBOL_P(key)) {
11647 size_t value = gc_stat_internal(key);
11648 return value;
11649 }
11650 else {
11651 gc_stat_internal(key);
11652 return 0;
11653 }
11654}
11655
11656
11657enum gc_stat_heap_sym {
11658 gc_stat_heap_sym_slot_size,
11659 gc_stat_heap_sym_heap_allocatable_pages,
11660 gc_stat_heap_sym_heap_eden_pages,
11661 gc_stat_heap_sym_heap_eden_slots,
11662 gc_stat_heap_sym_heap_tomb_pages,
11663 gc_stat_heap_sym_heap_tomb_slots,
11664 gc_stat_heap_sym_total_allocated_pages,
11665 gc_stat_heap_sym_total_freed_pages,
11666 gc_stat_heap_sym_force_major_gc_count,
11667 gc_stat_heap_sym_force_incremental_marking_finish_count,
11668 gc_stat_heap_sym_total_allocated_objects,
11669 gc_stat_heap_sym_total_freed_objects,
11670 gc_stat_heap_sym_last
11671};
11672
11673static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11674
11675static void
11676setup_gc_stat_heap_symbols(void)
11677{
11678 if (gc_stat_heap_symbols[0] == 0) {
11679#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11680 S(slot_size);
11681 S(heap_allocatable_pages);
11682 S(heap_eden_pages);
11683 S(heap_eden_slots);
11684 S(heap_tomb_pages);
11685 S(heap_tomb_slots);
11686 S(total_allocated_pages);
11687 S(total_freed_pages);
11688 S(force_major_gc_count);
11689 S(force_incremental_marking_finish_count);
11690 S(total_allocated_objects);
11691 S(total_freed_objects);
11692#undef S
11693 }
11694}
11695
11696static size_t
11697gc_stat_heap_internal(int size_pool_idx, VALUE hash_or_sym)
11698{
11699 rb_objspace_t *objspace = &rb_objspace;
11700 VALUE hash = Qnil, key = Qnil;
11701
11702 setup_gc_stat_heap_symbols();
11703
11704 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11705 hash = hash_or_sym;
11706 }
11707 else if (SYMBOL_P(hash_or_sym)) {
11708 key = hash_or_sym;
11709 }
11710 else {
11711 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11712 }
11713
11714 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11715 rb_raise(rb_eArgError, "size pool index out of range");
11716 }
11717
11718 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
11719
11720#define SET(name, attr) \
11721 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11722 return attr; \
11723 else if (hash != Qnil) \
11724 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11725
11726 SET(slot_size, size_pool->slot_size);
11727 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11728 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11729 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11730 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11731 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11732 SET(total_allocated_pages, size_pool->total_allocated_pages);
11733 SET(total_freed_pages, size_pool->total_freed_pages);
11734 SET(force_major_gc_count, size_pool->force_major_gc_count);
11735 SET(force_incremental_marking_finish_count, size_pool->force_incremental_marking_finish_count);
11736 SET(total_allocated_objects, size_pool->total_allocated_objects);
11737 SET(total_freed_objects, size_pool->total_freed_objects);
11738#undef SET
11739
11740 if (!NIL_P(key)) { /* matched key should return above */
11741 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11742 }
11743
11744 return 0;
11745}
11746
11747static VALUE
11748gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
11749{
11750 if (NIL_P(heap_name)) {
11751 if (NIL_P(arg)) {
11752 arg = rb_hash_new();
11753 }
11754 else if (RB_TYPE_P(arg, T_HASH)) {
11755 // ok
11756 }
11757 else {
11758 rb_raise(rb_eTypeError, "non-hash given");
11759 }
11760
11761 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11762 VALUE hash = rb_hash_aref(arg, INT2FIX(i));
11763 if (NIL_P(hash)) {
11764 hash = rb_hash_new();
11765 rb_hash_aset(arg, INT2FIX(i), hash);
11766 }
11767 gc_stat_heap_internal(i, hash);
11768 }
11769 }
11770 else if (FIXNUM_P(heap_name)) {
11771 int size_pool_idx = FIX2INT(heap_name);
11772
11773 if (NIL_P(arg)) {
11774 arg = rb_hash_new();
11775 }
11776 else if (SYMBOL_P(arg)) {
11777 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11778 return SIZET2NUM(value);
11779 }
11780 else if (RB_TYPE_P(arg, T_HASH)) {
11781 // ok
11782 }
11783 else {
11784 rb_raise(rb_eTypeError, "non-hash or symbol given");
11785 }
11786
11787 gc_stat_heap_internal(size_pool_idx, arg);
11788 }
11789 else {
11790 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
11791 }
11792
11793 return arg;
11794}
11795
11796static VALUE
11797gc_stress_get(rb_execution_context_t *ec, VALUE self)
11798{
11799 rb_objspace_t *objspace = &rb_objspace;
11800 return ruby_gc_stress_mode;
11801}
11802
11803static void
11804gc_stress_set(rb_objspace_t *objspace, VALUE flag)
11805{
11806 objspace->flags.gc_stressful = RTEST(flag);
11807 objspace->gc_stress_mode = flag;
11808}
11809
11810static VALUE
11811gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
11812{
11813 rb_objspace_t *objspace = &rb_objspace;
11814 gc_stress_set(objspace, flag);
11815 return flag;
11816}
11817
11818VALUE
11819rb_gc_enable(void)
11820{
11821 rb_objspace_t *objspace = &rb_objspace;
11822 return rb_objspace_gc_enable(objspace);
11823}
11824
11825VALUE
11826rb_objspace_gc_enable(rb_objspace_t *objspace)
11827{
11828 int old = dont_gc_val();
11829
11830 dont_gc_off();
11831 return RBOOL(old);
11832}
11833
11834static VALUE
11835gc_enable(rb_execution_context_t *ec, VALUE _)
11836{
11837 return rb_gc_enable();
11838}
11839
11840VALUE
11841rb_gc_disable_no_rest(void)
11842{
11843 rb_objspace_t *objspace = &rb_objspace;
11844 return gc_disable_no_rest(objspace);
11845}
11846
11847static VALUE
11848gc_disable_no_rest(rb_objspace_t *objspace)
11849{
11850 int old = dont_gc_val();
11851 dont_gc_on();
11852 return RBOOL(old);
11853}
11854
11855VALUE
11856rb_gc_disable(void)
11857{
11858 rb_objspace_t *objspace = &rb_objspace;
11859 return rb_objspace_gc_disable(objspace);
11860}
11861
11862VALUE
11863rb_objspace_gc_disable(rb_objspace_t *objspace)
11864{
11865 gc_rest(objspace);
11866 return gc_disable_no_rest(objspace);
11867}
11868
11869static VALUE
11870gc_disable(rb_execution_context_t *ec, VALUE _)
11871{
11872 return rb_gc_disable();
11873}
11874
11875#if GC_CAN_COMPILE_COMPACTION
11876/*
11877 * call-seq:
11878 * GC.auto_compact = flag
11879 *
11880 * Updates automatic compaction mode.
11881 *
11882 * When enabled, the compactor will execute on every major collection.
11883 *
11884 * Enabling compaction will degrade performance on major collections.
11885 */
11886static VALUE
11887gc_set_auto_compact(VALUE _, VALUE v)
11888{
11889 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11890
11891 ruby_enable_autocompact = RTEST(v);
11892
11893#if RGENGC_CHECK_MODE
11894 ruby_autocompact_compare_func = NULL;
11895
11896 if (SYMBOL_P(v)) {
11897 ID id = RB_SYM2ID(v);
11898 if (id == rb_intern("empty")) {
11899 ruby_autocompact_compare_func = compare_free_slots;
11900 }
11901 }
11902#endif
11903
11904 return v;
11905}
11906#else
11907# define gc_set_auto_compact rb_f_notimplement
11908#endif
11909
11910#if GC_CAN_COMPILE_COMPACTION
11911/*
11912 * call-seq:
11913 * GC.auto_compact -> true or false
11914 *
11915 * Returns whether or not automatic compaction has been enabled.
11916 */
11917static VALUE
11918gc_get_auto_compact(VALUE _)
11919{
11920 return RBOOL(ruby_enable_autocompact);
11921}
11922#else
11923# define gc_get_auto_compact rb_f_notimplement
11924#endif
11925
11926static int
11927get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
11928{
11929 const char *ptr = getenv(name);
11930 ssize_t val;
11931
11932 if (ptr != NULL && *ptr) {
11933 size_t unit = 0;
11934 char *end;
11935#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11936 val = strtoll(ptr, &end, 0);
11937#else
11938 val = strtol(ptr, &end, 0);
11939#endif
11940 switch (*end) {
11941 case 'k': case 'K':
11942 unit = 1024;
11943 ++end;
11944 break;
11945 case 'm': case 'M':
11946 unit = 1024*1024;
11947 ++end;
11948 break;
11949 case 'g': case 'G':
11950 unit = 1024*1024*1024;
11951 ++end;
11952 break;
11953 }
11954 while (*end && isspace((unsigned char)*end)) end++;
11955 if (*end) {
11956 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11957 return 0;
11958 }
11959 if (unit > 0) {
11960 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11961 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
11962 return 0;
11963 }
11964 val *= unit;
11965 }
11966 if (val > 0 && (size_t)val > lower_bound) {
11967 if (RTEST(ruby_verbose)) {
11968 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
11969 }
11970 *default_value = (size_t)val;
11971 return 1;
11972 }
11973 else {
11974 if (RTEST(ruby_verbose)) {
11975 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
11976 name, val, *default_value, lower_bound);
11977 }
11978 return 0;
11979 }
11980 }
11981 return 0;
11982}
11983
11984static int
11985get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
11986{
11987 const char *ptr = getenv(name);
11988 double val;
11989
11990 if (ptr != NULL && *ptr) {
11991 char *end;
11992 val = strtod(ptr, &end);
11993 if (!*ptr || *end) {
11994 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11995 return 0;
11996 }
11997
11998 if (accept_zero && val == 0.0) {
11999 goto accept;
12000 }
12001 else if (val <= lower_bound) {
12002 if (RTEST(ruby_verbose)) {
12003 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
12004 name, val, *default_value, lower_bound);
12005 }
12006 }
12007 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
12008 val > upper_bound) {
12009 if (RTEST(ruby_verbose)) {
12010 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
12011 name, val, *default_value, upper_bound);
12012 }
12013 }
12014 else {
12015 goto accept;
12016 }
12017 }
12018 return 0;
12019
12020 accept:
12021 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
12022 *default_value = val;
12023 return 1;
12024}
12025
12026static void
12027gc_set_initial_pages(rb_objspace_t *objspace)
12028{
12029 gc_rest(objspace);
12030
12031 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
12032 rb_size_pool_t *size_pool = &size_pools[i];
12033 char env_key[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT)];
12034 snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_%d_INIT_SLOTS", i);
12035
12036 size_t size_pool_init_slots = gc_params.size_pool_init_slots[i];
12037 if (get_envparam_size(env_key, &size_pool_init_slots, 0)) {
12038 gc_params.size_pool_init_slots[i] = size_pool_init_slots;
12039 }
12040
12041 if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
12042 size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
12043 size_pool->allocatable_pages = slots_to_pages_for_size_pool(objspace, size_pool, slots);
12044 }
12045 else {
12046 /* We already have more slots than size_pool_init_slots allows, so
12047 * prevent creating more pages. */
12048 size_pool->allocatable_pages = 0;
12049 }
12050 }
12051 heap_pages_expand_sorted(objspace);
12052}
12053
12054/*
12055 * GC tuning environment variables
12056 *
12057 * * RUBY_GC_HEAP_FREE_SLOTS
12058 * - Prepare at least this amount of slots after GC.
12059 * - Allocate slots if there are not enough slots.
12060 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
12061 * - Allocate slots by this factor.
12062 * - (next slots number) = (current slots number) * (this factor)
12063 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
12064 * - Allocation rate is limited to this number of slots.
12065 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
12066 * - Allocate additional pages when the number of free slots is
12067 * lower than the value (total_slots * (this ratio)).
12068 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
12069 * - Allocate slots to satisfy this formula:
12070 * free_slots = total_slots * goal_ratio
12071 * - In other words, prepare (total_slots * goal_ratio) free slots.
12072 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
12073 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
12074 * - Allow to free pages when the number of free slots is
12075 * greater than the value (total_slots * (this ratio)).
12076 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
12077 * - Do full GC when the number of old objects is more than R * N
12078 * where R is this factor and
12079 * N is the number of old objects just after last full GC.
12080 *
12081 * * obsolete
12082 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
12083 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
12084 *
12085 * * RUBY_GC_MALLOC_LIMIT
12086 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
12087 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
12088 *
12089 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
12090 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
12091 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
12092 */
12093
12094void
12095ruby_gc_set_params(void)
12096{
12097 rb_objspace_t *objspace = &rb_objspace;
12098 /* RUBY_GC_HEAP_FREE_SLOTS */
12099 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
12100 /* ok */
12101 }
12102
12103 gc_set_initial_pages(objspace);
12104
12105 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
12106 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
12107 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
12108 0.0, 1.0, FALSE);
12109 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
12110 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
12111 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
12112 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
12113 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
12114 get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
12115
12116 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
12117 malloc_limit = gc_params.malloc_limit_min;
12118 }
12119 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
12120 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
12121 gc_params.malloc_limit_max = SIZE_MAX;
12122 }
12123 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
12124
12125#if RGENGC_ESTIMATE_OLDMALLOC
12126 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
12127 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
12128 }
12129 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
12130 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
12131#endif
12132}
12133
12134static void
12135reachable_objects_from_callback(VALUE obj)
12136{
12137 rb_ractor_t *cr = GET_RACTOR();
12138 cr->mfd->mark_func(obj, cr->mfd->data);
12139}
12140
12141void
12142rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
12143{
12144 rb_objspace_t *objspace = &rb_objspace;
12145
12146 RB_VM_LOCK_ENTER();
12147 {
12148 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
12149
12150 if (is_markable_object(obj)) {
12151 rb_ractor_t *cr = GET_RACTOR();
12152 struct gc_mark_func_data_struct mfd = {
12153 .mark_func = func,
12154 .data = data,
12155 }, *prev_mfd = cr->mfd;
12156
12157 cr->mfd = &mfd;
12158 gc_mark_children(objspace, obj);
12159 cr->mfd = prev_mfd;
12160 }
12161 }
12162 RB_VM_LOCK_LEAVE();
12163}
12164
12166 const char *category;
12167 void (*func)(const char *category, VALUE, void *);
12168 void *data;
12169};
12170
12171static void
12172root_objects_from(VALUE obj, void *ptr)
12173{
12174 const struct root_objects_data *data = (struct root_objects_data *)ptr;
12175 (*data->func)(data->category, obj, data->data);
12176}
12177
12178void
12179rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
12180{
12181 rb_objspace_t *objspace = &rb_objspace;
12182 objspace_reachable_objects_from_root(objspace, func, passing_data);
12183}
12184
12185static void
12186objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
12187{
12188 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
12189
12190 rb_ractor_t *cr = GET_RACTOR();
12191 struct root_objects_data data = {
12192 .func = func,
12193 .data = passing_data,
12194 };
12195 struct gc_mark_func_data_struct mfd = {
12196 .mark_func = root_objects_from,
12197 .data = &data,
12198 }, *prev_mfd = cr->mfd;
12199
12200 cr->mfd = &mfd;
12201 gc_mark_roots(objspace, &data.category);
12202 cr->mfd = prev_mfd;
12203}
12204
12205/*
12206 ------------------------ Extended allocator ------------------------
12207*/
12208
12210 VALUE exc;
12211 const char *fmt;
12212 va_list *ap;
12213};
12214
12215static void *
12216gc_vraise(void *ptr)
12217{
12218 struct gc_raise_tag *argv = ptr;
12219 rb_vraise(argv->exc, argv->fmt, *argv->ap);
12220 UNREACHABLE_RETURN(NULL);
12221}
12222
12223static void
12224gc_raise(VALUE exc, const char *fmt, ...)
12225{
12226 va_list ap;
12227 va_start(ap, fmt);
12228 struct gc_raise_tag argv = {
12229 exc, fmt, &ap,
12230 };
12231
12232 if (ruby_thread_has_gvl_p()) {
12233 gc_vraise(&argv);
12235 }
12236 else if (ruby_native_thread_p()) {
12237 rb_thread_call_with_gvl(gc_vraise, &argv);
12239 }
12240 else {
12241 /* Not in a ruby thread */
12242 fprintf(stderr, "%s", "[FATAL] ");
12243 vfprintf(stderr, fmt, ap);
12244 }
12245
12246 va_end(ap);
12247 abort();
12248}
12249
12250static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
12251
12252static void
12253negative_size_allocation_error(const char *msg)
12254{
12255 gc_raise(rb_eNoMemError, "%s", msg);
12256}
12257
12258static void *
12259ruby_memerror_body(void *dummy)
12260{
12261 rb_memerror();
12262 return 0;
12263}
12264
12265NORETURN(static void ruby_memerror(void));
12267static void
12268ruby_memerror(void)
12269{
12270 if (ruby_thread_has_gvl_p()) {
12271 rb_memerror();
12272 }
12273 else {
12274 if (ruby_native_thread_p()) {
12275 rb_thread_call_with_gvl(ruby_memerror_body, 0);
12276 }
12277 else {
12278 /* no ruby thread */
12279 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12280 }
12281 }
12282 exit(EXIT_FAILURE);
12283}
12284
12285void
12286rb_memerror(void)
12287{
12288 rb_execution_context_t *ec = GET_EC();
12289 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12290 VALUE exc;
12291
12292 if (0) {
12293 // Print out pid, sleep, so you can attach debugger to see what went wrong:
12294 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
12295 sleep(60);
12296 }
12297
12298 if (during_gc) {
12299 // TODO: OMG!! How to implement it?
12300 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12301 }
12302
12303 exc = nomem_error;
12304 if (!exc ||
12305 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12306 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12307 exit(EXIT_FAILURE);
12308 }
12309 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12310 rb_ec_raised_clear(ec);
12311 }
12312 else {
12313 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12314 exc = ruby_vm_special_exception_copy(exc);
12315 }
12316 ec->errinfo = exc;
12317 EC_JUMP_TAG(ec, TAG_RAISE);
12318}
12319
12320void *
12321rb_aligned_malloc(size_t alignment, size_t size)
12322{
12323 /* alignment must be a power of 2 */
12324 GC_ASSERT(((alignment - 1) & alignment) == 0);
12325 GC_ASSERT(alignment % sizeof(void*) == 0);
12326
12327 void *res;
12328
12329#if defined __MINGW32__
12330 res = __mingw_aligned_malloc(size, alignment);
12331#elif defined _WIN32
12332 void *_aligned_malloc(size_t, size_t);
12333 res = _aligned_malloc(size, alignment);
12334#elif defined(HAVE_POSIX_MEMALIGN)
12335 if (posix_memalign(&res, alignment, size) != 0) {
12336 return NULL;
12337 }
12338#elif defined(HAVE_MEMALIGN)
12339 res = memalign(alignment, size);
12340#else
12341 char* aligned;
12342 res = malloc(alignment + size + sizeof(void*));
12343 aligned = (char*)res + alignment + sizeof(void*);
12344 aligned -= ((VALUE)aligned & (alignment - 1));
12345 ((void**)aligned)[-1] = res;
12346 res = (void*)aligned;
12347#endif
12348
12349 GC_ASSERT((uintptr_t)res % alignment == 0);
12350
12351 return res;
12352}
12353
12354static void
12355rb_aligned_free(void *ptr, size_t size)
12356{
12357#if defined __MINGW32__
12358 __mingw_aligned_free(ptr);
12359#elif defined _WIN32
12360 _aligned_free(ptr);
12361#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12362 free(ptr);
12363#else
12364 free(((void**)ptr)[-1]);
12365#endif
12366}
12367
12368static inline size_t
12369objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
12370{
12371#ifdef HAVE_MALLOC_USABLE_SIZE
12372 return malloc_usable_size(ptr);
12373#else
12374 return hint;
12375#endif
12376}
12377
12378enum memop_type {
12379 MEMOP_TYPE_MALLOC = 0,
12380 MEMOP_TYPE_FREE,
12381 MEMOP_TYPE_REALLOC
12382};
12383
12384static inline void
12385atomic_sub_nounderflow(size_t *var, size_t sub)
12386{
12387 if (sub == 0) return;
12388
12389 while (1) {
12390 size_t val = *var;
12391 if (val < sub) sub = val;
12392 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
12393 }
12394}
12395
12396static void
12397objspace_malloc_gc_stress(rb_objspace_t *objspace)
12398{
12399 if (ruby_gc_stressful && ruby_native_thread_p()) {
12400 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12401 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12402
12403 if (gc_stress_full_mark_after_malloc_p()) {
12404 reason |= GPR_FLAG_FULL_MARK;
12405 }
12406 garbage_collect_with_gvl(objspace, reason);
12407 }
12408}
12409
12410static inline bool
12411objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12412{
12413 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
12414 mem,
12415 type == MEMOP_TYPE_MALLOC ? "malloc" :
12416 type == MEMOP_TYPE_FREE ? "free " :
12417 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
12418 new_size, old_size);
12419 return false;
12420}
12421
12422static bool
12423objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12424{
12425 if (new_size > old_size) {
12426 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12427#if RGENGC_ESTIMATE_OLDMALLOC
12428 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12429#endif
12430 }
12431 else {
12432 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12433#if RGENGC_ESTIMATE_OLDMALLOC
12434 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12435#endif
12436 }
12437
12438 if (type == MEMOP_TYPE_MALLOC) {
12439 retry:
12440 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
12441 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12442 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
12443 goto retry;
12444 }
12445 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12446 }
12447 }
12448
12449#if MALLOC_ALLOCATED_SIZE
12450 if (new_size >= old_size) {
12451 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12452 }
12453 else {
12454 size_t dec_size = old_size - new_size;
12455 size_t allocated_size = objspace->malloc_params.allocated_size;
12456
12457#if MALLOC_ALLOCATED_SIZE_CHECK
12458 if (allocated_size < dec_size) {
12459 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
12460 }
12461#endif
12462 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12463 }
12464
12465 switch (type) {
12466 case MEMOP_TYPE_MALLOC:
12467 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12468 break;
12469 case MEMOP_TYPE_FREE:
12470 {
12471 size_t allocations = objspace->malloc_params.allocations;
12472 if (allocations > 0) {
12473 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12474 }
12475#if MALLOC_ALLOCATED_SIZE_CHECK
12476 else {
12477 GC_ASSERT(objspace->malloc_params.allocations > 0);
12478 }
12479#endif
12480 }
12481 break;
12482 case MEMOP_TYPE_REALLOC: /* ignore */ break;
12483 }
12484#endif
12485 return true;
12486}
12487
12488#define objspace_malloc_increase(...) \
12489 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12490 !malloc_increase_done; \
12491 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12492
12493struct malloc_obj_info { /* 4 words */
12494 size_t size;
12495#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12496 size_t gen;
12497 const char *file;
12498 size_t line;
12499#endif
12500};
12501
12502#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12503const char *ruby_malloc_info_file;
12504int ruby_malloc_info_line;
12505#endif
12506
12507static inline size_t
12508objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
12509{
12510 if (size == 0) size = 1;
12511
12512#if CALC_EXACT_MALLOC_SIZE
12513 size += sizeof(struct malloc_obj_info);
12514#endif
12515
12516 return size;
12517}
12518
12519static bool
12520malloc_during_gc_p(rb_objspace_t *objspace)
12521{
12522 /* malloc is not allowed during GC when we're not using multiple ractors
12523 * (since ractors can run while another thread is sweeping) and when we
12524 * have the GVL (since if we don't have the GVL, we'll try to acquire the
12525 * GVL which will block and ensure the other thread finishes GC). */
12526 return during_gc && !dont_gc_val() && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12527}
12528
12529static inline void *
12530objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
12531{
12532 size = objspace_malloc_size(objspace, mem, size);
12533 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12534
12535#if CALC_EXACT_MALLOC_SIZE
12536 {
12537 struct malloc_obj_info *info = mem;
12538 info->size = size;
12539#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12540 info->gen = objspace->profile.count;
12541 info->file = ruby_malloc_info_file;
12542 info->line = info->file ? ruby_malloc_info_line : 0;
12543#endif
12544 mem = info + 1;
12545 }
12546#endif
12547
12548 return mem;
12549}
12550
12551#if defined(__GNUC__) && RUBY_DEBUG
12552#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12553#endif
12554
12555#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12556# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12557#endif
12558
12559#define GC_MEMERROR(...) \
12560 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12561
12562#define TRY_WITH_GC(siz, expr) do { \
12563 const gc_profile_record_flag gpr = \
12564 GPR_FLAG_FULL_MARK | \
12565 GPR_FLAG_IMMEDIATE_MARK | \
12566 GPR_FLAG_IMMEDIATE_SWEEP | \
12567 GPR_FLAG_MALLOC; \
12568 objspace_malloc_gc_stress(objspace); \
12569 \
12570 if (LIKELY((expr))) { \
12571 /* Success on 1st try */ \
12572 } \
12573 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12574 /* @shyouhei thinks this doesn't happen */ \
12575 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12576 } \
12577 else if ((expr)) { \
12578 /* Success on 2nd try */ \
12579 } \
12580 else { \
12581 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12582 "%"PRIdSIZE" bytes for %s", \
12583 siz, # expr); \
12584 } \
12585 } while (0)
12586
12587static void
12588check_malloc_not_in_gc(rb_objspace_t *objspace, const char *msg)
12589{
12590 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12591 dont_gc_on();
12592 during_gc = false;
12593 rb_bug("Cannot %s during GC", msg);
12594 }
12595}
12596
12597/* these shouldn't be called directly.
12598 * objspace_* functions do not check allocation size.
12599 */
12600static void *
12601objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
12602{
12603 check_malloc_not_in_gc(objspace, "malloc");
12604
12605 void *mem;
12606
12607 size = objspace_malloc_prepare(objspace, size);
12608 TRY_WITH_GC(size, mem = malloc(size));
12609 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12610 return objspace_malloc_fixup(objspace, mem, size);
12611}
12612
12613static inline size_t
12614xmalloc2_size(const size_t count, const size_t elsize)
12615{
12616 return size_mul_or_raise(count, elsize, rb_eArgError);
12617}
12618
12619static void *
12620objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
12621{
12622 check_malloc_not_in_gc(objspace, "realloc");
12623
12624 void *mem;
12625
12626 if (!ptr) return objspace_xmalloc0(objspace, new_size);
12627
12628 /*
12629 * The behavior of realloc(ptr, 0) is implementation defined.
12630 * Therefore we don't use realloc(ptr, 0) for portability reason.
12631 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
12632 */
12633 if (new_size == 0) {
12634 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12635 /*
12636 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
12637 * returns a non-NULL pointer to an access-protected memory page.
12638 * The returned pointer cannot be read / written at all, but
12639 * still be a valid argument of free().
12640 *
12641 * https://man.openbsd.org/malloc.3
12642 *
12643 * - Linux's malloc(3) man page says that it _might_ perhaps return
12644 * a non-NULL pointer when its argument is 0. That return value
12645 * is safe (and is expected) to be passed to free().
12646 *
12647 * https://man7.org/linux/man-pages/man3/malloc.3.html
12648 *
12649 * - As I read the implementation jemalloc's malloc() returns fully
12650 * normal 16 bytes memory region when its argument is 0.
12651 *
12652 * - As I read the implementation musl libc's malloc() returns
12653 * fully normal 32 bytes memory region when its argument is 0.
12654 *
12655 * - Other malloc implementations can also return non-NULL.
12656 */
12657 objspace_xfree(objspace, ptr, old_size);
12658 return mem;
12659 }
12660 else {
12661 /*
12662 * It is dangerous to return NULL here, because that could lead to
12663 * RCE. Fallback to 1 byte instead of zero.
12664 *
12665 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
12666 */
12667 new_size = 1;
12668 }
12669 }
12670
12671#if CALC_EXACT_MALLOC_SIZE
12672 {
12673 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12674 new_size += sizeof(struct malloc_obj_info);
12675 ptr = info;
12676 old_size = info->size;
12677 }
12678#endif
12679
12680 old_size = objspace_malloc_size(objspace, ptr, old_size);
12681 TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
12682 new_size = objspace_malloc_size(objspace, mem, new_size);
12683
12684#if CALC_EXACT_MALLOC_SIZE
12685 {
12686 struct malloc_obj_info *info = mem;
12687 info->size = new_size;
12688 mem = info + 1;
12689 }
12690#endif
12691
12692 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12693
12694 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12695 return mem;
12696}
12697
12698#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12699
12700#define MALLOC_INFO_GEN_SIZE 100
12701#define MALLOC_INFO_SIZE_SIZE 10
12702static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12703static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12704static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12705static st_table *malloc_info_file_table;
12706
12707static int
12708mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12709{
12710 const char *file = (void *)key;
12711 const size_t *data = (void *)val;
12712
12713 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
12714
12715 return ST_CONTINUE;
12716}
12717
12718__attribute__((destructor))
12719void
12720rb_malloc_info_show_results(void)
12721{
12722 int i;
12723
12724 fprintf(stderr, "* malloc_info gen statistics\n");
12725 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12726 if (i == MALLOC_INFO_GEN_SIZE-1) {
12727 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12728 }
12729 else {
12730 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12731 }
12732 }
12733
12734 fprintf(stderr, "* malloc_info size statistics\n");
12735 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12736 int s = 16 << i;
12737 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
12738 }
12739 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
12740
12741 if (malloc_info_file_table) {
12742 fprintf(stderr, "* malloc_info file statistics\n");
12743 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12744 }
12745}
12746#else
12747void
12748rb_malloc_info_show_results(void)
12749{
12750}
12751#endif
12752
12753static void
12754objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
12755{
12756 if (!ptr) {
12757 /*
12758 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
12759 * its first version. We would better follow.
12760 */
12761 return;
12762 }
12763#if CALC_EXACT_MALLOC_SIZE
12764 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12765 ptr = info;
12766 old_size = info->size;
12767
12768#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12769 {
12770 int gen = (int)(objspace->profile.count - info->gen);
12771 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12772 int i;
12773
12774 malloc_info_gen_cnt[gen_index]++;
12775 malloc_info_gen_size[gen_index] += info->size;
12776
12777 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12778 size_t s = 16 << i;
12779 if (info->size <= s) {
12780 malloc_info_size[i]++;
12781 goto found;
12782 }
12783 }
12784 malloc_info_size[i]++;
12785 found:;
12786
12787 {
12788 st_data_t key = (st_data_t)info->file, d;
12789 size_t *data;
12790
12791 if (malloc_info_file_table == NULL) {
12792 malloc_info_file_table = st_init_numtable_with_size(1024);
12793 }
12794 if (st_lookup(malloc_info_file_table, key, &d)) {
12795 /* hit */
12796 data = (size_t *)d;
12797 }
12798 else {
12799 data = malloc(xmalloc2_size(2, sizeof(size_t)));
12800 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
12801 data[0] = data[1] = 0;
12802 st_insert(malloc_info_file_table, key, (st_data_t)data);
12803 }
12804 data[0] ++;
12805 data[1] += info->size;
12806 };
12807 if (0 && gen >= 2) { /* verbose output */
12808 if (info->file) {
12809 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
12810 info->size, gen, info->file, info->line);
12811 }
12812 else {
12813 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
12814 info->size, gen);
12815 }
12816 }
12817 }
12818#endif
12819#endif
12820 old_size = objspace_malloc_size(objspace, ptr, old_size);
12821
12822 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12823 free(ptr);
12824 ptr = NULL;
12825 RB_DEBUG_COUNTER_INC(heap_xfree);
12826 }
12827}
12828
12829static void *
12830ruby_xmalloc0(size_t size)
12831{
12832 return objspace_xmalloc0(&rb_objspace, size);
12833}
12834
12835void *
12836ruby_xmalloc_body(size_t size)
12837{
12838 if ((ssize_t)size < 0) {
12839 negative_size_allocation_error("too large allocation size");
12840 }
12841 return ruby_xmalloc0(size);
12842}
12843
12844void
12845ruby_malloc_size_overflow(size_t count, size_t elsize)
12846{
12847 rb_raise(rb_eArgError,
12848 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
12849 count, elsize);
12850}
12851
12852void *
12853ruby_xmalloc2_body(size_t n, size_t size)
12854{
12855 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
12856}
12857
12858static void *
12859objspace_xcalloc(rb_objspace_t *objspace, size_t size)
12860{
12861 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12862 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
12863#if RGENGC_CHECK_MODE || RUBY_DEBUG
12864 rb_bug("Cannot calloc during GC");
12865#endif
12866 }
12867
12868 void *mem;
12869
12870 size = objspace_malloc_prepare(objspace, size);
12871 TRY_WITH_GC(size, mem = calloc1(size));
12872 return objspace_malloc_fixup(objspace, mem, size);
12873}
12874
12875void *
12876ruby_xcalloc_body(size_t n, size_t size)
12877{
12878 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
12879}
12880
12881#ifdef ruby_sized_xrealloc
12882#undef ruby_sized_xrealloc
12883#endif
12884void *
12885ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
12886{
12887 if ((ssize_t)new_size < 0) {
12888 negative_size_allocation_error("too large allocation size");
12889 }
12890
12891 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
12892}
12893
12894void *
12895ruby_xrealloc_body(void *ptr, size_t new_size)
12896{
12897 return ruby_sized_xrealloc(ptr, new_size, 0);
12898}
12899
12900#ifdef ruby_sized_xrealloc2
12901#undef ruby_sized_xrealloc2
12902#endif
12903void *
12904ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
12905{
12906 size_t len = xmalloc2_size(n, size);
12907 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
12908}
12909
12910void *
12911ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
12912{
12913 return ruby_sized_xrealloc2(ptr, n, size, 0);
12914}
12915
12916#ifdef ruby_sized_xfree
12917#undef ruby_sized_xfree
12918#endif
12919void
12920ruby_sized_xfree(void *x, size_t size)
12921{
12922 if (LIKELY(x)) {
12923 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
12924 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
12925 * that case. */
12926 if (LIKELY(GET_VM())) {
12927 objspace_xfree(&rb_objspace, x, size);
12928 }
12929 else {
12930 ruby_mimfree(x);
12931 }
12932 }
12933}
12934
12935void
12936ruby_xfree(void *x)
12937{
12938 ruby_sized_xfree(x, 0);
12939}
12940
12941void *
12942rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12943{
12944 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12945 return ruby_xmalloc(w);
12946}
12947
12948void *
12949rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12950{
12951 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12952 return ruby_xcalloc(w, 1);
12953}
12954
12955void *
12956rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
12957{
12958 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12959 return ruby_xrealloc((void *)p, w);
12960}
12961
12962void *
12963rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12964{
12965 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12966 return ruby_xmalloc(u);
12967}
12968
12969void *
12970rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12971{
12972 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12973 return ruby_xcalloc(u, 1);
12974}
12975
12976/* Mimic ruby_xmalloc, but need not rb_objspace.
12977 * should return pointer suitable for ruby_xfree
12978 */
12979void *
12980ruby_mimmalloc(size_t size)
12981{
12982 void *mem;
12983#if CALC_EXACT_MALLOC_SIZE
12984 size += sizeof(struct malloc_obj_info);
12985#endif
12986 mem = malloc(size);
12987#if CALC_EXACT_MALLOC_SIZE
12988 if (!mem) {
12989 return NULL;
12990 }
12991 else
12992 /* set 0 for consistency of allocated_size/allocations */
12993 {
12994 struct malloc_obj_info *info = mem;
12995 info->size = 0;
12996#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12997 info->gen = 0;
12998 info->file = NULL;
12999 info->line = 0;
13000#endif
13001 mem = info + 1;
13002 }
13003#endif
13004 return mem;
13005}
13006
13007void
13008ruby_mimfree(void *ptr)
13009{
13010#if CALC_EXACT_MALLOC_SIZE
13011 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
13012 ptr = info;
13013#endif
13014 free(ptr);
13015}
13016
13017void *
13018rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
13019{
13020 void *ptr;
13021 VALUE imemo;
13022 rb_imemo_tmpbuf_t *tmpbuf;
13023
13024 /* Keep the order; allocate an empty imemo first then xmalloc, to
13025 * get rid of potential memory leak */
13026 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
13027 *store = imemo;
13028 ptr = ruby_xmalloc0(size);
13029 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
13030 tmpbuf->ptr = ptr;
13031 tmpbuf->cnt = cnt;
13032 return ptr;
13033}
13034
13035void *
13036rb_alloc_tmp_buffer(volatile VALUE *store, long len)
13037{
13038 long cnt;
13039
13040 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
13041 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
13042 }
13043
13044 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
13045}
13046
13047void
13048rb_free_tmp_buffer(volatile VALUE *store)
13049{
13050 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
13051 if (s) {
13052 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
13053 s->cnt = 0;
13054 ruby_xfree(ptr);
13055 }
13056}
13057
13058#if MALLOC_ALLOCATED_SIZE
13059/*
13060 * call-seq:
13061 * GC.malloc_allocated_size -> Integer
13062 *
13063 * Returns the size of memory allocated by malloc().
13064 *
13065 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
13066 */
13067
13068static VALUE
13069gc_malloc_allocated_size(VALUE self)
13070{
13071 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
13072}
13073
13074/*
13075 * call-seq:
13076 * GC.malloc_allocations -> Integer
13077 *
13078 * Returns the number of malloc() allocations.
13079 *
13080 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
13081 */
13082
13083static VALUE
13084gc_malloc_allocations(VALUE self)
13085{
13086 return UINT2NUM(rb_objspace.malloc_params.allocations);
13087}
13088#endif
13089
13090void
13091rb_gc_adjust_memory_usage(ssize_t diff)
13092{
13093 unless_objspace(objspace) { return; }
13094
13095 if (diff > 0) {
13096 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
13097 }
13098 else if (diff < 0) {
13099 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
13100 }
13101}
13102
13103/*
13104 ------------------------------ GC profiler ------------------------------
13105*/
13106
13107#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13108
13109static bool
13110current_process_time(struct timespec *ts)
13111{
13112#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13113 {
13114 static int try_clock_gettime = 1;
13115 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13116 return true;
13117 }
13118 else {
13119 try_clock_gettime = 0;
13120 }
13121 }
13122#endif
13123
13124#ifdef RUSAGE_SELF
13125 {
13126 struct rusage usage;
13127 struct timeval time;
13128 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13129 time = usage.ru_utime;
13130 ts->tv_sec = time.tv_sec;
13131 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13132 return true;
13133 }
13134 }
13135#endif
13136
13137#ifdef _WIN32
13138 {
13139 FILETIME creation_time, exit_time, kernel_time, user_time;
13140 ULARGE_INTEGER ui;
13141
13142 if (GetProcessTimes(GetCurrentProcess(),
13143 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13144 memcpy(&ui, &user_time, sizeof(FILETIME));
13145#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13146 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13147 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13148 return true;
13149 }
13150 }
13151#endif
13152
13153 return false;
13154}
13155
13156static double
13157getrusage_time(void)
13158{
13159 struct timespec ts;
13160 if (current_process_time(&ts)) {
13161 return ts.tv_sec + ts.tv_nsec * 1e-9;
13162 }
13163 else {
13164 return 0.0;
13165 }
13166}
13167
13168
13169static inline void
13170gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
13171{
13172 if (objspace->profile.run) {
13173 size_t index = objspace->profile.next_index;
13174 gc_profile_record *record;
13175
13176 /* create new record */
13177 objspace->profile.next_index++;
13178
13179 if (!objspace->profile.records) {
13180 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13181 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13182 }
13183 if (index >= objspace->profile.size) {
13184 void *ptr;
13185 objspace->profile.size += 1000;
13186 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13187 if (!ptr) rb_memerror();
13188 objspace->profile.records = ptr;
13189 }
13190 if (!objspace->profile.records) {
13191 rb_bug("gc_profile malloc or realloc miss");
13192 }
13193 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13194 MEMZERO(record, gc_profile_record, 1);
13195
13196 /* setup before-GC parameter */
13197 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13198#if MALLOC_ALLOCATED_SIZE
13199 record->allocated_size = malloc_allocated_size;
13200#endif
13201#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13202#ifdef RUSAGE_SELF
13203 {
13204 struct rusage usage;
13205 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13206 record->maxrss = usage.ru_maxrss;
13207 record->minflt = usage.ru_minflt;
13208 record->majflt = usage.ru_majflt;
13209 }
13210 }
13211#endif
13212#endif
13213 }
13214}
13215
13216static inline void
13217gc_prof_timer_start(rb_objspace_t *objspace)
13218{
13219 if (gc_prof_enabled(objspace)) {
13220 gc_profile_record *record = gc_prof_record(objspace);
13221#if GC_PROFILE_MORE_DETAIL
13222 record->prepare_time = objspace->profile.prepare_time;
13223#endif
13224 record->gc_time = 0;
13225 record->gc_invoke_time = getrusage_time();
13226 }
13227}
13228
13229static double
13230elapsed_time_from(double time)
13231{
13232 double now = getrusage_time();
13233 if (now > time) {
13234 return now - time;
13235 }
13236 else {
13237 return 0;
13238 }
13239}
13240
13241static inline void
13242gc_prof_timer_stop(rb_objspace_t *objspace)
13243{
13244 if (gc_prof_enabled(objspace)) {
13245 gc_profile_record *record = gc_prof_record(objspace);
13246 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13247 record->gc_invoke_time -= objspace->profile.invoke_time;
13248 }
13249}
13250
13251#define RUBY_DTRACE_GC_HOOK(name) \
13252 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13253static inline void
13254gc_prof_mark_timer_start(rb_objspace_t *objspace)
13255{
13256 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13257#if GC_PROFILE_MORE_DETAIL
13258 if (gc_prof_enabled(objspace)) {
13259 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13260 }
13261#endif
13262}
13263
13264static inline void
13265gc_prof_mark_timer_stop(rb_objspace_t *objspace)
13266{
13267 RUBY_DTRACE_GC_HOOK(MARK_END);
13268#if GC_PROFILE_MORE_DETAIL
13269 if (gc_prof_enabled(objspace)) {
13270 gc_profile_record *record = gc_prof_record(objspace);
13271 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13272 }
13273#endif
13274}
13275
13276static inline void
13277gc_prof_sweep_timer_start(rb_objspace_t *objspace)
13278{
13279 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13280 if (gc_prof_enabled(objspace)) {
13281 gc_profile_record *record = gc_prof_record(objspace);
13282
13283 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13284 objspace->profile.gc_sweep_start_time = getrusage_time();
13285 }
13286 }
13287}
13288
13289static inline void
13290gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
13291{
13292 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13293
13294 if (gc_prof_enabled(objspace)) {
13295 double sweep_time;
13296 gc_profile_record *record = gc_prof_record(objspace);
13297
13298 if (record->gc_time > 0) {
13299 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13300 /* need to accumulate GC time for lazy sweep after gc() */
13301 record->gc_time += sweep_time;
13302 }
13303 else if (GC_PROFILE_MORE_DETAIL) {
13304 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13305 }
13306
13307#if GC_PROFILE_MORE_DETAIL
13308 record->gc_sweep_time += sweep_time;
13309 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13310#endif
13311 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13312 }
13313}
13314
13315static inline void
13316gc_prof_set_malloc_info(rb_objspace_t *objspace)
13317{
13318#if GC_PROFILE_MORE_DETAIL
13319 if (gc_prof_enabled(objspace)) {
13320 gc_profile_record *record = gc_prof_record(objspace);
13321 record->allocate_increase = malloc_increase;
13322 record->allocate_limit = malloc_limit;
13323 }
13324#endif
13325}
13326
13327static inline void
13328gc_prof_set_heap_info(rb_objspace_t *objspace)
13329{
13330 if (gc_prof_enabled(objspace)) {
13331 gc_profile_record *record = gc_prof_record(objspace);
13332 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
13333 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13334
13335#if GC_PROFILE_MORE_DETAIL
13336 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13337 record->heap_live_objects = live;
13338 record->heap_free_objects = total - live;
13339#endif
13340
13341 record->heap_total_objects = total;
13342 record->heap_use_size = live * sizeof(RVALUE);
13343 record->heap_total_size = total * sizeof(RVALUE);
13344 }
13345}
13346
13347/*
13348 * call-seq:
13349 * GC::Profiler.clear -> nil
13350 *
13351 * Clears the \GC profiler data.
13352 *
13353 */
13354
13355static VALUE
13356gc_profile_clear(VALUE _)
13357{
13358 rb_objspace_t *objspace = &rb_objspace;
13359 void *p = objspace->profile.records;
13360 objspace->profile.records = NULL;
13361 objspace->profile.size = 0;
13362 objspace->profile.next_index = 0;
13363 objspace->profile.current_record = 0;
13364 free(p);
13365 return Qnil;
13366}
13367
13368/*
13369 * call-seq:
13370 * GC::Profiler.raw_data -> [Hash, ...]
13371 *
13372 * Returns an Array of individual raw profile data Hashes ordered
13373 * from earliest to latest by +:GC_INVOKE_TIME+.
13374 *
13375 * For example:
13376 *
13377 * [
13378 * {
13379 * :GC_TIME=>1.3000000000000858e-05,
13380 * :GC_INVOKE_TIME=>0.010634999999999999,
13381 * :HEAP_USE_SIZE=>289640,
13382 * :HEAP_TOTAL_SIZE=>588960,
13383 * :HEAP_TOTAL_OBJECTS=>14724,
13384 * :GC_IS_MARKED=>false
13385 * },
13386 * # ...
13387 * ]
13388 *
13389 * The keys mean:
13390 *
13391 * +:GC_TIME+::
13392 * Time elapsed in seconds for this GC run
13393 * +:GC_INVOKE_TIME+::
13394 * Time elapsed in seconds from startup to when the GC was invoked
13395 * +:HEAP_USE_SIZE+::
13396 * Total bytes of heap used
13397 * +:HEAP_TOTAL_SIZE+::
13398 * Total size of heap in bytes
13399 * +:HEAP_TOTAL_OBJECTS+::
13400 * Total number of objects
13401 * +:GC_IS_MARKED+::
13402 * Returns +true+ if the GC is in mark phase
13403 *
13404 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
13405 * to the following hash keys:
13406 *
13407 * +:GC_MARK_TIME+::
13408 * +:GC_SWEEP_TIME+::
13409 * +:ALLOCATE_INCREASE+::
13410 * +:ALLOCATE_LIMIT+::
13411 * +:HEAP_USE_PAGES+::
13412 * +:HEAP_LIVE_OBJECTS+::
13413 * +:HEAP_FREE_OBJECTS+::
13414 * +:HAVE_FINALIZE+::
13415 *
13416 */
13417
13418static VALUE
13419gc_profile_record_get(VALUE _)
13420{
13421 VALUE prof;
13422 VALUE gc_profile = rb_ary_new();
13423 size_t i;
13424 rb_objspace_t *objspace = (&rb_objspace);
13425
13426 if (!objspace->profile.run) {
13427 return Qnil;
13428 }
13429
13430 for (i =0; i < objspace->profile.next_index; i++) {
13431 gc_profile_record *record = &objspace->profile.records[i];
13432
13433 prof = rb_hash_new();
13434 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
13435 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
13436 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
13437 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
13438 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
13439 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
13440 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
13441 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
13442#if GC_PROFILE_MORE_DETAIL
13443 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
13444 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
13445 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
13446 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
13447 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
13448 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
13449 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
13450
13451 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
13452 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
13453
13454 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13455#endif
13456
13457#if RGENGC_PROFILE > 0
13458 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
13459 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
13460 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
13461#endif
13462 rb_ary_push(gc_profile, prof);
13463 }
13464
13465 return gc_profile;
13466}
13467
13468#if GC_PROFILE_MORE_DETAIL
13469#define MAJOR_REASON_MAX 0x10
13470
13471static char *
13472gc_profile_dump_major_reason(unsigned int flags, char *buff)
13473{
13474 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13475 int i = 0;
13476
13477 if (reason == GPR_FLAG_NONE) {
13478 buff[0] = '-';
13479 buff[1] = 0;
13480 }
13481 else {
13482#define C(x, s) \
13483 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13484 buff[i++] = #x[0]; \
13485 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13486 buff[i] = 0; \
13487 }
13488 C(NOFREE, N);
13489 C(OLDGEN, O);
13490 C(SHADY, S);
13491#if RGENGC_ESTIMATE_OLDMALLOC
13492 C(OLDMALLOC, M);
13493#endif
13494#undef C
13495 }
13496 return buff;
13497}
13498#endif
13499
13500static void
13501gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
13502{
13503 rb_objspace_t *objspace = &rb_objspace;
13504 size_t count = objspace->profile.next_index;
13505#ifdef MAJOR_REASON_MAX
13506 char reason_str[MAJOR_REASON_MAX];
13507#endif
13508
13509 if (objspace->profile.run && count /* > 1 */) {
13510 size_t i;
13511 const gc_profile_record *record;
13512
13513 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
13514 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13515
13516 for (i = 0; i < count; i++) {
13517 record = &objspace->profile.records[i];
13518 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
13519 i+1, record->gc_invoke_time, record->heap_use_size,
13520 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13521 }
13522
13523#if GC_PROFILE_MORE_DETAIL
13524 const char *str = "\n\n" \
13525 "More detail.\n" \
13526 "Prepare Time = Previously GC's rest sweep time\n"
13527 "Index Flags Allocate Inc. Allocate Limit"
13528#if CALC_EXACT_MALLOC_SIZE
13529 " Allocated Size"
13530#endif
13531 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13532#if RGENGC_PROFILE
13533 " OldgenObj RemNormObj RemShadObj"
13534#endif
13535#if GC_PROFILE_DETAIL_MEMORY
13536 " MaxRSS(KB) MinorFLT MajorFLT"
13537#endif
13538 "\n";
13539 append(out, rb_str_new_cstr(str));
13540
13541 for (i = 0; i < count; i++) {
13542 record = &objspace->profile.records[i];
13543 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
13544#if CALC_EXACT_MALLOC_SIZE
13545 " %15"PRIuSIZE
13546#endif
13547 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13548#if RGENGC_PROFILE
13549 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13550#endif
13551#if GC_PROFILE_DETAIL_MEMORY
13552 "%11ld %8ld %8ld"
13553#endif
13554
13555 "\n",
13556 i+1,
13557 gc_profile_dump_major_reason(record->flags, reason_str),
13558 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
13559 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
13560 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
13561 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
13562 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
13563 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
13564 record->allocate_increase, record->allocate_limit,
13565#if CALC_EXACT_MALLOC_SIZE
13566 record->allocated_size,
13567#endif
13568 record->heap_use_pages,
13569 record->gc_mark_time*1000,
13570 record->gc_sweep_time*1000,
13571 record->prepare_time*1000,
13572
13573 record->heap_live_objects,
13574 record->heap_free_objects,
13575 record->removing_objects,
13576 record->empty_objects
13577#if RGENGC_PROFILE
13578 ,
13579 record->old_objects,
13580 record->remembered_normal_objects,
13581 record->remembered_shady_objects
13582#endif
13583#if GC_PROFILE_DETAIL_MEMORY
13584 ,
13585 record->maxrss / 1024,
13586 record->minflt,
13587 record->majflt
13588#endif
13589
13590 ));
13591 }
13592#endif
13593 }
13594}
13595
13596/*
13597 * call-seq:
13598 * GC::Profiler.result -> String
13599 *
13600 * Returns a profile data report such as:
13601 *
13602 * GC 1 invokes.
13603 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
13604 * 1 0.012 159240 212940 10647 0.00000000000001530000
13605 */
13606
13607static VALUE
13608gc_profile_result(VALUE _)
13609{
13610 VALUE str = rb_str_buf_new(0);
13611 gc_profile_dump_on(str, rb_str_buf_append);
13612 return str;
13613}
13614
13615/*
13616 * call-seq:
13617 * GC::Profiler.report
13618 * GC::Profiler.report(io)
13619 *
13620 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
13621 *
13622 */
13623
13624static VALUE
13625gc_profile_report(int argc, VALUE *argv, VALUE self)
13626{
13627 VALUE out;
13628
13629 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
13630 gc_profile_dump_on(out, rb_io_write);
13631
13632 return Qnil;
13633}
13634
13635/*
13636 * call-seq:
13637 * GC::Profiler.total_time -> float
13638 *
13639 * The total time used for garbage collection in seconds
13640 */
13641
13642static VALUE
13643gc_profile_total_time(VALUE self)
13644{
13645 double time = 0;
13646 rb_objspace_t *objspace = &rb_objspace;
13647
13648 if (objspace->profile.run && objspace->profile.next_index > 0) {
13649 size_t i;
13650 size_t count = objspace->profile.next_index;
13651
13652 for (i = 0; i < count; i++) {
13653 time += objspace->profile.records[i].gc_time;
13654 }
13655 }
13656 return DBL2NUM(time);
13657}
13658
13659/*
13660 * call-seq:
13661 * GC::Profiler.enabled? -> true or false
13662 *
13663 * The current status of \GC profile mode.
13664 */
13665
13666static VALUE
13667gc_profile_enable_get(VALUE self)
13668{
13669 rb_objspace_t *objspace = &rb_objspace;
13670 return RBOOL(objspace->profile.run);
13671}
13672
13673/*
13674 * call-seq:
13675 * GC::Profiler.enable -> nil
13676 *
13677 * Starts the \GC profiler.
13678 *
13679 */
13680
13681static VALUE
13682gc_profile_enable(VALUE _)
13683{
13684 rb_objspace_t *objspace = &rb_objspace;
13685 objspace->profile.run = TRUE;
13686 objspace->profile.current_record = 0;
13687 return Qnil;
13688}
13689
13690/*
13691 * call-seq:
13692 * GC::Profiler.disable -> nil
13693 *
13694 * Stops the \GC profiler.
13695 *
13696 */
13697
13698static VALUE
13699gc_profile_disable(VALUE _)
13700{
13701 rb_objspace_t *objspace = &rb_objspace;
13702
13703 objspace->profile.run = FALSE;
13704 objspace->profile.current_record = 0;
13705 return Qnil;
13706}
13707
13708/*
13709 ------------------------------ DEBUG ------------------------------
13710*/
13711
13712static const char *
13713type_name(int type, VALUE obj)
13714{
13715 switch (type) {
13716#define TYPE_NAME(t) case (t): return #t;
13717 TYPE_NAME(T_NONE);
13718 TYPE_NAME(T_OBJECT);
13719 TYPE_NAME(T_CLASS);
13720 TYPE_NAME(T_MODULE);
13721 TYPE_NAME(T_FLOAT);
13722 TYPE_NAME(T_STRING);
13723 TYPE_NAME(T_REGEXP);
13724 TYPE_NAME(T_ARRAY);
13725 TYPE_NAME(T_HASH);
13726 TYPE_NAME(T_STRUCT);
13727 TYPE_NAME(T_BIGNUM);
13728 TYPE_NAME(T_FILE);
13729 TYPE_NAME(T_MATCH);
13730 TYPE_NAME(T_COMPLEX);
13731 TYPE_NAME(T_RATIONAL);
13732 TYPE_NAME(T_NIL);
13733 TYPE_NAME(T_TRUE);
13734 TYPE_NAME(T_FALSE);
13735 TYPE_NAME(T_SYMBOL);
13736 TYPE_NAME(T_FIXNUM);
13737 TYPE_NAME(T_UNDEF);
13738 TYPE_NAME(T_IMEMO);
13739 TYPE_NAME(T_ICLASS);
13740 TYPE_NAME(T_MOVED);
13741 TYPE_NAME(T_ZOMBIE);
13742 case T_DATA:
13743 if (obj && rb_objspace_data_type_name(obj)) {
13744 return rb_objspace_data_type_name(obj);
13745 }
13746 return "T_DATA";
13747#undef TYPE_NAME
13748 }
13749 return "unknown";
13750}
13751
13752static const char *
13753obj_type_name(VALUE obj)
13754{
13755 return type_name(TYPE(obj), obj);
13756}
13757
13758const char *
13759rb_method_type_name(rb_method_type_t type)
13760{
13761 switch (type) {
13762 case VM_METHOD_TYPE_ISEQ: return "iseq";
13763 case VM_METHOD_TYPE_ATTRSET: return "attrest";
13764 case VM_METHOD_TYPE_IVAR: return "ivar";
13765 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
13766 case VM_METHOD_TYPE_ALIAS: return "alias";
13767 case VM_METHOD_TYPE_REFINED: return "refined";
13768 case VM_METHOD_TYPE_CFUNC: return "cfunc";
13769 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
13770 case VM_METHOD_TYPE_MISSING: return "missing";
13771 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
13772 case VM_METHOD_TYPE_UNDEF: return "undef";
13773 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
13774 }
13775 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
13776}
13777
13778static void
13779rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
13780{
13781 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
13782 VALUE path = rb_iseq_path(iseq);
13783 int n = ISEQ_BODY(iseq)->location.first_lineno;
13784 snprintf(buff, buff_size, " %s@%s:%d",
13785 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13786 RSTRING_PTR(path), n);
13787 }
13788}
13789
13790static int
13791str_len_no_raise(VALUE str)
13792{
13793 long len = RSTRING_LEN(str);
13794 if (len < 0) return 0;
13795 if (len > INT_MAX) return INT_MAX;
13796 return (int)len;
13797}
13798
13799#define BUFF_ARGS buff + pos, buff_size - pos
13800#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13801#define APPEND_S(s) do { \
13802 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13803 goto end; \
13804 } \
13805 else { \
13806 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13807 } \
13808 } while (0)
13809#define TF(c) ((c) != 0 ? "true" : "false")
13810#define C(c, s) ((c) != 0 ? (s) : " ")
13811
13812static size_t
13813rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
13814{
13815 size_t pos = 0;
13816
13817 if (SPECIAL_CONST_P(obj)) {
13818 APPEND_F("%s", obj_type_name(obj));
13819
13820 if (FIXNUM_P(obj)) {
13821 APPEND_F(" %ld", FIX2LONG(obj));
13822 }
13823 else if (SYMBOL_P(obj)) {
13824 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
13825 }
13826 }
13827 else {
13828 const int age = RVALUE_AGE_GET(obj);
13829
13830 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
13831 APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
13832 (void *)obj, age,
13833 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
13834 C(RVALUE_MARK_BITMAP(obj), "M"),
13835 C(RVALUE_PIN_BITMAP(obj), "P"),
13836 C(RVALUE_MARKING_BITMAP(obj), "R"),
13837 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
13838 C(rb_objspace_garbage_object_p(obj), "G"),
13839 obj_type_name(obj));
13840 }
13841 else {
13842 /* fake */
13843 APPEND_F("%p [%dXXXX] %s",
13844 (void *)obj, age,
13845 obj_type_name(obj));
13846 }
13847
13848 if (internal_object_p(obj)) {
13849 /* ignore */
13850 }
13851 else if (RBASIC(obj)->klass == 0) {
13852 APPEND_S("(temporary internal)");
13853 }
13854 else if (RTEST(RBASIC(obj)->klass)) {
13855 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
13856 if (!NIL_P(class_path)) {
13857 APPEND_F("(%s)", RSTRING_PTR(class_path));
13858 }
13859 }
13860
13861#if GC_DEBUG
13862 APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
13863#endif
13864 }
13865 end:
13866
13867 return pos;
13868}
13869
13870static size_t
13871rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
13872{
13873 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
13874 const enum ruby_value_type type = BUILTIN_TYPE(obj);
13875
13876 switch (type) {
13877 case T_NODE:
13878 UNEXPECTED_NODE(rb_raw_obj_info);
13879 break;
13880 case T_ARRAY:
13881 if (ARY_SHARED_P(obj)) {
13882 APPEND_S("shared -> ");
13883 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
13884 }
13885 else if (ARY_EMBED_P(obj)) {
13886 APPEND_F("[%s%s] len: %ld (embed)",
13887 C(ARY_EMBED_P(obj), "E"),
13888 C(ARY_SHARED_P(obj), "S"),
13889 RARRAY_LEN(obj));
13890 }
13891 else {
13892 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
13893 C(ARY_EMBED_P(obj), "E"),
13894 C(ARY_SHARED_P(obj), "S"),
13895 RARRAY_LEN(obj),
13896 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
13897 (void *)RARRAY_CONST_PTR(obj));
13898 }
13899 break;
13900 case T_STRING: {
13901 if (STR_SHARED_P(obj)) {
13902 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
13903 }
13904 else {
13905 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
13906
13907 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
13908 }
13909 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
13910 break;
13911 }
13912 case T_SYMBOL: {
13913 VALUE fstr = RSYMBOL(obj)->fstr;
13914 ID id = RSYMBOL(obj)->id;
13915 if (RB_TYPE_P(fstr, T_STRING)) {
13916 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
13917 }
13918 else {
13919 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
13920 }
13921 break;
13922 }
13923 case T_MOVED: {
13924 APPEND_F("-> %p", (void*)rb_gc_location(obj));
13925 break;
13926 }
13927 case T_HASH: {
13928 APPEND_F("[%c] %"PRIdSIZE,
13929 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
13930 RHASH_SIZE(obj));
13931 break;
13932 }
13933 case T_CLASS:
13934 case T_MODULE:
13935 {
13936 VALUE class_path = rb_class_path_cached(obj);
13937 if (!NIL_P(class_path)) {
13938 APPEND_F("%s", RSTRING_PTR(class_path));
13939 }
13940 else {
13941 APPEND_S("(anon)");
13942 }
13943 break;
13944 }
13945 case T_ICLASS:
13946 {
13947 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
13948 if (!NIL_P(class_path)) {
13949 APPEND_F("src:%s", RSTRING_PTR(class_path));
13950 }
13951 break;
13952 }
13953 case T_OBJECT:
13954 {
13955 if (rb_shape_obj_too_complex(obj)) {
13956 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
13957 APPEND_F("(too_complex) len:%zu", hash_len);
13958 }
13959 else {
13960 uint32_t len = ROBJECT_IV_CAPACITY(obj);
13961
13962 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13963 APPEND_F("(embed) len:%d", len);
13964 }
13965 else {
13966 VALUE *ptr = ROBJECT_IVPTR(obj);
13967 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
13968 }
13969 }
13970 }
13971 break;
13972 case T_DATA: {
13973 const struct rb_block *block;
13974 const rb_iseq_t *iseq;
13975 if (rb_obj_is_proc(obj) &&
13976 (block = vm_proc_block(obj)) != NULL &&
13977 (vm_block_type(block) == block_type_iseq) &&
13978 (iseq = vm_block_iseq(block)) != NULL) {
13979 rb_raw_iseq_info(BUFF_ARGS, iseq);
13980 }
13981 else if (rb_ractor_p(obj)) {
13982 rb_ractor_t *r = (void *)DATA_PTR(obj);
13983 if (r) {
13984 APPEND_F("r:%d", r->pub.id);
13985 }
13986 }
13987 else {
13988 const char * const type_name = rb_objspace_data_type_name(obj);
13989 if (type_name) {
13990 APPEND_F("%s", type_name);
13991 }
13992 }
13993 break;
13994 }
13995 case T_IMEMO: {
13996 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
13997
13998 switch (imemo_type(obj)) {
13999 case imemo_ment:
14000 {
14001 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
14002
14003 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14004 rb_id2name(me->called_id),
14005 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
14006 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
14007 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
14008 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
14009 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
14010 me->def ? rb_method_type_name(me->def->type) : "NULL",
14011 me->def ? me->def->aliased : -1,
14012 (void *)me->owner, // obj_info(me->owner),
14013 (void *)me->defined_class); //obj_info(me->defined_class)));
14014
14015 if (me->def) {
14016 switch (me->def->type) {
14017 case VM_METHOD_TYPE_ISEQ:
14018 APPEND_S(" (iseq:");
14019 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
14020 APPEND_S(")");
14021 break;
14022 default:
14023 break;
14024 }
14025 }
14026
14027 break;
14028 }
14029 case imemo_iseq: {
14030 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
14031 rb_raw_iseq_info(BUFF_ARGS, iseq);
14032 break;
14033 }
14034 case imemo_callinfo:
14035 {
14036 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
14037 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
14038 rb_id2name(vm_ci_mid(ci)),
14039 vm_ci_flag(ci),
14040 vm_ci_argc(ci),
14041 vm_ci_kwarg(ci) ? "available" : "NULL");
14042 break;
14043 }
14044 case imemo_callcache:
14045 {
14046 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
14047 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
14048 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
14049
14050 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
14051 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
14052 cme ? rb_id2name(cme->called_id) : "<NULL>",
14053 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
14054 (void *)cme,
14055 (void *)vm_cc_call(cc));
14056 break;
14057 }
14058 default:
14059 break;
14060 }
14061 }
14062 default:
14063 break;
14064 }
14065 }
14066 end:
14067
14068 return pos;
14069}
14070
14071#undef TF
14072#undef C
14073
14074const char *
14075rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
14076{
14077 asan_unpoisoning_object(obj) {
14078 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14079 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14080 if (pos >= buff_size) {} // truncated
14081 }
14082
14083 return buff;
14084}
14085
14086#undef APPEND_S
14087#undef APPEND_F
14088#undef BUFF_ARGS
14089
14090#if RGENGC_OBJ_INFO
14091#define OBJ_INFO_BUFFERS_NUM 10
14092#define OBJ_INFO_BUFFERS_SIZE 0x100
14093static rb_atomic_t obj_info_buffers_index = 0;
14094static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14095
14096/* Increments *var atomically and resets *var to 0 when maxval is
14097 * reached. Returns the wraparound old *var value (0...maxval). */
14098static rb_atomic_t
14099atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
14100{
14101 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
14102 if (UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
14103 const rb_atomic_t newval = oldval + 1;
14104 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
14105 oldval %= maxval;
14106 }
14107 return oldval;
14108}
14109
14110static const char *
14111obj_info(VALUE obj)
14112{
14113 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14114 char *const buff = obj_info_buffers[index];
14115 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14116}
14117#else
14118static const char *
14119obj_info(VALUE obj)
14120{
14121 return obj_type_name(obj);
14122}
14123#endif
14124
14125const char *
14126rb_obj_info(VALUE obj)
14127{
14128 return obj_info(obj);
14129}
14130
14131void
14132rb_obj_info_dump(VALUE obj)
14133{
14134 char buff[0x100];
14135 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14136}
14137
14138void
14139rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
14140{
14141 char buff[0x100];
14142 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14143}
14144
14145#if GC_DEBUG
14146
14147void
14148rb_gcdebug_print_obj_condition(VALUE obj)
14149{
14150 rb_objspace_t *objspace = &rb_objspace;
14151
14152 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14153
14154 if (BUILTIN_TYPE(obj) == T_MOVED) {
14155 fprintf(stderr, "moved?: true\n");
14156 }
14157 else {
14158 fprintf(stderr, "moved?: false\n");
14159 }
14160 if (is_pointer_to_heap(objspace, (void *)obj)) {
14161 fprintf(stderr, "pointer to heap?: true\n");
14162 }
14163 else {
14164 fprintf(stderr, "pointer to heap?: false\n");
14165 return;
14166 }
14167
14168 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
14169 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
14170 fprintf(stderr, "age? : %d\n", RVALUE_AGE_GET(obj));
14171 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
14172 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
14173 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
14174
14175 if (is_lazy_sweeping(objspace)) {
14176 fprintf(stderr, "lazy sweeping?: true\n");
14177 fprintf(stderr, "swept?: %s\n", is_swept_object(obj) ? "done" : "not yet");
14178 }
14179 else {
14180 fprintf(stderr, "lazy sweeping?: false\n");
14181 }
14182}
14183
14184static VALUE
14185gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
14186{
14187 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
14188 return Qnil;
14189}
14190
14191void
14192rb_gcdebug_sentinel(VALUE obj, const char *name)
14193{
14194 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
14195}
14196
14197#endif /* GC_DEBUG */
14198
14199/*
14200 * call-seq:
14201 * GC.add_stress_to_class(class[, ...])
14202 *
14203 * Raises NoMemoryError when allocating an instance of the given classes.
14204 *
14205 */
14206static VALUE
14207rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
14208{
14209 rb_objspace_t *objspace = &rb_objspace;
14210
14211 if (!stress_to_class) {
14212 set_stress_to_class(rb_ary_hidden_new(argc));
14213 }
14214 rb_ary_cat(stress_to_class, argv, argc);
14215 return self;
14216}
14217
14218/*
14219 * call-seq:
14220 * GC.remove_stress_to_class(class[, ...])
14221 *
14222 * No longer raises NoMemoryError when allocating an instance of the
14223 * given classes.
14224 *
14225 */
14226static VALUE
14227rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
14228{
14229 rb_objspace_t *objspace = &rb_objspace;
14230 int i;
14231
14232 if (stress_to_class) {
14233 for (i = 0; i < argc; ++i) {
14234 rb_ary_delete_same(stress_to_class, argv[i]);
14235 }
14236 if (RARRAY_LEN(stress_to_class) == 0) {
14237 set_stress_to_class(0);
14238 }
14239 }
14240 return Qnil;
14241}
14242
14243/*
14244 * Document-module: ObjectSpace
14245 *
14246 * The ObjectSpace module contains a number of routines
14247 * that interact with the garbage collection facility and allow you to
14248 * traverse all living objects with an iterator.
14249 *
14250 * ObjectSpace also provides support for object finalizers, procs that will be
14251 * called when a specific object is about to be destroyed by garbage
14252 * collection. See the documentation for
14253 * <code>ObjectSpace.define_finalizer</code> for important information on
14254 * how to use this method correctly.
14255 *
14256 * a = "A"
14257 * b = "B"
14258 *
14259 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
14260 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
14261 *
14262 * a = nil
14263 * b = nil
14264 *
14265 * _produces:_
14266 *
14267 * Finalizer two on 537763470
14268 * Finalizer one on 537763480
14269 */
14270
14271/* Document-class: GC::Profiler
14272 *
14273 * The GC profiler provides access to information on GC runs including time,
14274 * length and object space size.
14275 *
14276 * Example:
14277 *
14278 * GC::Profiler.enable
14279 *
14280 * require 'rdoc/rdoc'
14281 *
14282 * GC::Profiler.report
14283 *
14284 * GC::Profiler.disable
14285 *
14286 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
14287 */
14288
14289#include "gc.rbinc"
14290
14291void
14292Init_GC(void)
14293{
14294#undef rb_intern
14295 malloc_offset = gc_compute_malloc_offset();
14296
14297 VALUE rb_mObjSpace;
14298 VALUE rb_mProfiler;
14299 VALUE gc_constants;
14300
14301 rb_mGC = rb_define_module("GC");
14302
14303 gc_constants = rb_hash_new();
14304 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
14305 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14306 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
14307 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
14308 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14309 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14310 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
14311 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
14312 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14313 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE));
14314 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
14315 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue);
14316 }
14317 OBJ_FREEZE(gc_constants);
14318 /* Internal constants in the garbage collector. */
14319 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
14320
14321 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
14322 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
14323 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
14324 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
14325 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
14326 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
14327 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
14328 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
14329 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
14330
14331 rb_mObjSpace = rb_define_module("ObjectSpace");
14332
14333 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
14334
14335 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
14336 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
14337
14338 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
14339
14340 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
14341
14342 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
14343 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
14344
14345 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
14346
14347 /* internal methods */
14348 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
14349#if MALLOC_ALLOCATED_SIZE
14350 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
14351 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
14352#endif
14353
14354 if (GC_COMPACTION_SUPPORTED) {
14355 rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
14356 rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
14357 rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
14358 rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
14359 }
14360 else {
14364 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
14365 /* When !GC_COMPACTION_SUPPORTED, this method is not defined in gc.rb */
14366 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
14367 }
14368
14369 if (GC_DEBUG_STRESS_TO_CLASS) {
14370 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
14371 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
14372 }
14373
14374 {
14375 VALUE opts;
14376 /* \GC build options */
14377 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
14378#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14379 OPT(GC_DEBUG);
14380 OPT(USE_RGENGC);
14381 OPT(RGENGC_DEBUG);
14382 OPT(RGENGC_CHECK_MODE);
14383 OPT(RGENGC_PROFILE);
14384 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14385 OPT(GC_PROFILE_MORE_DETAIL);
14386 OPT(GC_ENABLE_LAZY_SWEEP);
14387 OPT(CALC_EXACT_MALLOC_SIZE);
14388 OPT(MALLOC_ALLOCATED_SIZE);
14389 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14390 OPT(GC_PROFILE_DETAIL_MEMORY);
14391 OPT(GC_COMPACTION_SUPPORTED);
14392#undef OPT
14393 OBJ_FREEZE(opts);
14394 }
14395}
14396
14397#ifdef ruby_xmalloc
14398#undef ruby_xmalloc
14399#endif
14400#ifdef ruby_xmalloc2
14401#undef ruby_xmalloc2
14402#endif
14403#ifdef ruby_xcalloc
14404#undef ruby_xcalloc
14405#endif
14406#ifdef ruby_xrealloc
14407#undef ruby_xrealloc
14408#endif
14409#ifdef ruby_xrealloc2
14410#undef ruby_xrealloc2
14411#endif
14412
14413void *
14414ruby_xmalloc(size_t size)
14415{
14416#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14417 ruby_malloc_info_file = __FILE__;
14418 ruby_malloc_info_line = __LINE__;
14419#endif
14420 return ruby_xmalloc_body(size);
14421}
14422
14423void *
14424ruby_xmalloc2(size_t n, size_t size)
14425{
14426#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14427 ruby_malloc_info_file = __FILE__;
14428 ruby_malloc_info_line = __LINE__;
14429#endif
14430 return ruby_xmalloc2_body(n, size);
14431}
14432
14433void *
14434ruby_xcalloc(size_t n, size_t size)
14435{
14436#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14437 ruby_malloc_info_file = __FILE__;
14438 ruby_malloc_info_line = __LINE__;
14439#endif
14440 return ruby_xcalloc_body(n, size);
14441}
14442
14443void *
14444ruby_xrealloc(void *ptr, size_t new_size)
14445{
14446#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14447 ruby_malloc_info_file = __FILE__;
14448 ruby_malloc_info_line = __LINE__;
14449#endif
14450 return ruby_xrealloc_body(ptr, new_size);
14451}
14452
14453void *
14454ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
14455{
14456#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14457 ruby_malloc_info_file = __FILE__;
14458 ruby_malloc_info_line = __LINE__;
14459#endif
14460 return ruby_xrealloc2_body(ptr, n, new_size);
14461}
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:177
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:341
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:138
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition stdalign.h:28
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:665
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1781
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1748
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition event.h:99
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition event.h:98
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition event.h:97
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition event.h:96
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition fl_type.h:666
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
Definition fl_type.h:218
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1080
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition class.c:1104
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2621
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:65
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition gc.h:636
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6505
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:6545
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1355
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1348
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:471
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:96
VALUE rb_mKernel
Kernel module.
Definition object.c:63
VALUE rb_mGC
GC module.
Definition gc.c:1342
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:62
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:147
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:821
VALUE rb_stdout
STDOUT constant.
Definition io.c:190
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3136
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition defines.h:91
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:769
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
Definition gc.h:722
#define USE_RGENGC
Definition gc.h:444
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition gc.h:506
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen.
Definition error.h:264
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:828
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:135
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1502
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:815
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:292
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1145
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1159
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:687
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1165
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:375
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2806
#define RB_SYM2ID
Just another name of rb_sym2id.
Definition symbol.h:43
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition symbol.c:950
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1817
#define strtod(s, e)
Just another name of ruby_strtod.
Definition util.h:223
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
Definition util.h:39
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:378
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition pid_t.h:38
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:136
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:417
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5452
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
MEMO.
Definition imemo.h:103
Ruby's array.
Definition rarray.h:128
Ruby's object's, base components.
Definition rbasic.h:64
const VALUE klass
Class of an object.
Definition rbasic.h:88
VALUE flags
Per-object flags.
Definition rbasic.h:77
Definition class.h:80
Internal header for Complex.
Definition complex.h:13
Definition rdata.h:124
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:138
Ruby's File and IO.
Definition rfile.h:35
struct rb_io * fptr
IO's specific fields.
Definition rfile.h:41
Definition hash.h:53
Regular expression execution context.
Definition rmatch.h:96
VALUE regexp
The expression of this match.
Definition rmatch.h:109
VALUE str
The target string that the match was made against.
Definition rmatch.h:104
Definition gc.c:644
Ruby's ordinal objects.
Definition robject.h:83
Internal header for Rational.
Definition rational.h:16
Ruby's regular expression.
Definition rregexp.h:60
const VALUE src
Source code of this expression.
Definition rregexp.h:74
Ruby's String.
Definition rstring.h:196
union RString::@51::@52::@54 aux
Auxiliary info.
VALUE shared
Parent of the string.
Definition rstring.h:240
union RString::@51 as
String's specific fields.
struct RString::@51::@52 heap
Strings that use separated memory region for contents use this pattern.
"Typed" user data.
Definition rtypeddata.h:350
const rb_data_type_t *const type
This field stores various information about how Ruby should handle a data.
Definition rtypeddata.h:360
Definition gc.c:653
Definition gc.c:1328
Definition gc.c:738
Definition vm_core.h:233
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:251
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:221
struct rb_data_type_struct::@55 function
Function pointers.
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:309
VALUE ecopts
Flags as Ruby hash.
Definition io.h:137
Ruby's IO, metadata and buffers.
Definition io.h:143
struct rb_io_encoding encs
Decomposed encoding flags.
Definition io.h:196
VALUE self
The IO's Ruby level counterpart.
Definition io.h:146
VALUE write_lock
This is a Ruby level mutex.
Definition io.h:248
VALUE timeout
The timeout associated with this IO when performing blocking operations.
Definition io.h:254
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition io.h:238
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition io.h:193
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition io.h:220
VALUE pathv
pathname for file
Definition io.h:170
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:54
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Internal header for Class.
Definition class.h:29
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:83
SVAR (Special VARiable)
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:61
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:181
ruby_value_type
C-level type of an object.
Definition value_type.h:112
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:144