Ruby 3.3.0p0 (2023-12-25 revision 5124f9ac7513eb590c37717337c430cb93caa151)
vm_sync.c
1#include "internal/gc.h"
2#include "internal/thread.h"
3#include "vm_core.h"
4#include "vm_sync.h"
5#include "ractor_core.h"
6#include "vm_debug.h"
7
8void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
9void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
10
11static bool
12vm_locked(rb_vm_t *vm)
13{
14 return vm->ractor.sync.lock_owner == GET_RACTOR();
15}
16
17#if RUBY_DEBUG > 0
18void
19RUBY_ASSERT_vm_locking(void)
20{
21 if (rb_multi_ractor_p()) {
22 rb_vm_t *vm = GET_VM();
23 VM_ASSERT(vm_locked(vm));
24 }
25}
26
27void
28RUBY_ASSERT_vm_unlocking(void)
29{
30 if (rb_multi_ractor_p()) {
31 rb_vm_t *vm = GET_VM();
32 VM_ASSERT(!vm_locked(vm));
33 }
34}
35#endif
36
37bool
38rb_vm_locked_p(void)
39{
40 return vm_locked(GET_VM());
41}
42
43static void
44vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
45{
46 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
47
48 if (locked) {
49 ASSERT_vm_locking();
50 }
51 else {
52#if RACTOR_CHECK_MODE
53 // locking ractor and acquire VM lock will cause deadlock
54 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
55#endif
56 // lock
57 rb_native_mutex_lock(&vm->ractor.sync.lock);
58 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
59 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
60
61#ifdef RUBY_THREAD_PTHREAD_H
62 if (!no_barrier &&
63 cr->threads.sched.running != NULL // ractor has running threads.
64 ) {
65
66 while (vm->ractor.sched.barrier_waiting) {
67 RUBY_DEBUG_LOG("barrier serial:%u", vm->ractor.sched.barrier_serial);
68 rb_ractor_sched_barrier_join(vm, cr);
69 }
70 }
71#else
72 if (!no_barrier) {
73 while (vm->ractor.sync.barrier_waiting) {
74 rb_ractor_sched_barrier_join(vm, cr);
75 }
76 }
77#endif
78
79 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
80 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
81 vm->ractor.sync.lock_owner = cr;
82 }
83
84 vm->ractor.sync.lock_rec++;
85 *lev = vm->ractor.sync.lock_rec;
86
87 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
88 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
89}
90
91static void
92vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
93{
94 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
95 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner),
96 vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
97
98 ASSERT_vm_locking();
99 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
100 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
101
102 vm->ractor.sync.lock_rec--;
103 *lev = vm->ractor.sync.lock_rec;
104
105 if (vm->ractor.sync.lock_rec == 0) {
106 vm->ractor.sync.lock_owner = NULL;
107 rb_native_mutex_unlock(&vm->ractor.sync.lock);
108 }
109}
110
111void
112rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
113{
114 rb_vm_t *vm = GET_VM();
115 if (vm_locked(vm)) {
116 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
117 }
118 else {
119 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
120 }
121}
122
123void
124rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
125{
126 rb_vm_t *vm = GET_VM();
127 if (vm_locked(vm)) {
128 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
129 }
130 else {
131 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
132 }
133}
134
135void
136rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
137{
138 rb_vm_t *vm = GET_VM();
139 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
140}
141
142void
143rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
144{
145 vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
146}
147
148void
149rb_vm_lock_body(LOCATION_ARGS)
150{
151 rb_vm_t *vm = GET_VM();
152 ASSERT_vm_unlocking();
153
154 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
155}
156
157void
158rb_vm_unlock_body(LOCATION_ARGS)
159{
160 rb_vm_t *vm = GET_VM();
161 ASSERT_vm_locking();
162 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
163 vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
164}
165
166static void
167vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
168{
169 ASSERT_vm_locking();
170 unsigned int lock_rec = vm->ractor.sync.lock_rec;
171 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
172
173 vm->ractor.sync.lock_rec = 0;
174 vm->ractor.sync.lock_owner = NULL;
175 if (msec > 0) {
176 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
177 }
178 else {
179 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
180 }
181 vm->ractor.sync.lock_rec = lock_rec;
182 vm->ractor.sync.lock_owner = cr;
183}
184
185void
186rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
187{
188 vm_cond_wait(vm, cond, 0);
189}
190
191void
192rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
193{
194 vm_cond_wait(vm, cond, msec);
195}
196
197void
198rb_vm_barrier(void)
199{
200 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
201
202 if (!rb_multi_ractor_p()) {
203 // no other ractors
204 return;
205 }
206 else {
207 rb_vm_t *vm = GET_VM();
208 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
209 ASSERT_vm_locking();
210 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
211 VM_ASSERT(cr == GET_RACTOR());
212 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
213
214 rb_ractor_sched_barrier_start(vm, cr);
215 }
216}
217
218void
219rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
220 unsigned int recorded_lock_rec,
221 unsigned int current_lock_rec)
222{
223 VM_ASSERT(recorded_lock_rec != current_lock_rec);
224
225 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
226 rb_bug("unexpected situation - recordd:%u current:%u",
227 recorded_lock_rec, current_lock_rec);
228 }
229 else {
230 while (recorded_lock_rec < current_lock_rec) {
231 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
232 }
233 }
234
235 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
236}
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.