Ruby 3.4.9p82 (2026-03-11 revision 76cca827ab52ab1d346a728f068d5b8da3e2952b)
thread_pthread.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
16#include "rjit.h"
17
18#ifdef HAVE_SYS_RESOURCE_H
19#include <sys/resource.h>
20#endif
21#ifdef HAVE_THR_STKSEGMENT
22#include <thread.h>
23#endif
24#if defined(HAVE_FCNTL_H)
25#include <fcntl.h>
26#elif defined(HAVE_SYS_FCNTL_H)
27#include <sys/fcntl.h>
28#endif
29#ifdef HAVE_SYS_PRCTL_H
30#include <sys/prctl.h>
31#endif
32#if defined(HAVE_SYS_TIME_H)
33#include <sys/time.h>
34#endif
35#if defined(__HAIKU__)
36#include <kernel/OS.h>
37#endif
38#ifdef __linux__
39#include <sys/syscall.h> /* for SYS_gettid */
40#endif
41#include <time.h>
42#include <signal.h>
43
44#if defined __APPLE__
45# include <AvailabilityMacros.h>
46#endif
47
48#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49# define USE_EVENTFD (1)
50# include <sys/eventfd.h>
51#else
52# define USE_EVENTFD (0)
53#endif
54
55#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58static pthread_condattr_t condattr_mono;
59static pthread_condattr_t *condattr_monotonic = &condattr_mono;
60#else
61static const void *const condattr_monotonic = NULL;
62#endif
63
64#include COROUTINE_H
65
66#ifndef HAVE_SYS_EVENT_H
67#define HAVE_SYS_EVENT_H 0
68#endif
69
70#ifndef HAVE_SYS_EPOLL_H
71#define HAVE_SYS_EPOLL_H 0
72#else
73// force setting for debug
74// #undef HAVE_SYS_EPOLL_H
75// #define HAVE_SYS_EPOLL_H 0
76#endif
77
78#ifndef USE_MN_THREADS
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
80 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
81 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
89 #else
90 #define USE_MN_THREADS 0
91 #endif
92#endif
93
94// native thread wrappers
95
96#define NATIVE_MUTEX_LOCK_DEBUG 0
97
98static void
99mutex_debug(const char *msg, void *lock)
100{
101 if (NATIVE_MUTEX_LOCK_DEBUG) {
102 int r;
103 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
104
105 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
106 fprintf(stdout, "%s: %p\n", msg, lock);
107 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
108 }
109}
110
111void
112rb_native_mutex_lock(pthread_mutex_t *lock)
113{
114 int r;
115 mutex_debug("lock", lock);
116 if ((r = pthread_mutex_lock(lock)) != 0) {
117 rb_bug_errno("pthread_mutex_lock", r);
118 }
119}
120
121void
122rb_native_mutex_unlock(pthread_mutex_t *lock)
123{
124 int r;
125 mutex_debug("unlock", lock);
126 if ((r = pthread_mutex_unlock(lock)) != 0) {
127 rb_bug_errno("pthread_mutex_unlock", r);
128 }
129}
130
131int
132rb_native_mutex_trylock(pthread_mutex_t *lock)
133{
134 int r;
135 mutex_debug("trylock", lock);
136 if ((r = pthread_mutex_trylock(lock)) != 0) {
137 if (r == EBUSY) {
138 return EBUSY;
139 }
140 else {
141 rb_bug_errno("pthread_mutex_trylock", r);
142 }
143 }
144 return 0;
145}
146
147void
148rb_native_mutex_initialize(pthread_mutex_t *lock)
149{
150 int r = pthread_mutex_init(lock, 0);
151 mutex_debug("init", lock);
152 if (r != 0) {
153 rb_bug_errno("pthread_mutex_init", r);
154 }
155}
156
157void
158rb_native_mutex_destroy(pthread_mutex_t *lock)
159{
160 int r = pthread_mutex_destroy(lock);
161 mutex_debug("destroy", lock);
162 if (r != 0) {
163 rb_bug_errno("pthread_mutex_destroy", r);
164 }
165}
166
167void
168rb_native_cond_initialize(rb_nativethread_cond_t *cond)
169{
170 int r = pthread_cond_init(cond, condattr_monotonic);
171 if (r != 0) {
172 rb_bug_errno("pthread_cond_init", r);
173 }
174}
175
176void
177rb_native_cond_destroy(rb_nativethread_cond_t *cond)
178{
179 int r = pthread_cond_destroy(cond);
180 if (r != 0) {
181 rb_bug_errno("pthread_cond_destroy", r);
182 }
183}
184
185/*
186 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
187 * EAGAIN after retrying 8192 times. You can see them in the following page:
188 *
189 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
190 *
191 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
192 * need to retrying until pthread functions don't return EAGAIN.
193 */
194
195void
196rb_native_cond_signal(rb_nativethread_cond_t *cond)
197{
198 int r;
199 do {
200 r = pthread_cond_signal(cond);
201 } while (r == EAGAIN);
202 if (r != 0) {
203 rb_bug_errno("pthread_cond_signal", r);
204 }
205}
206
207void
208rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
209{
210 int r;
211 do {
212 r = pthread_cond_broadcast(cond);
213 } while (r == EAGAIN);
214 if (r != 0) {
215 rb_bug_errno("rb_native_cond_broadcast", r);
216 }
217}
218
219void
220rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
221{
222 int r = pthread_cond_wait(cond, mutex);
223 if (r != 0) {
224 rb_bug_errno("pthread_cond_wait", r);
225 }
226}
227
228static int
229native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
230{
231 int r;
232 struct timespec ts;
233
234 /*
235 * An old Linux may return EINTR. Even though POSIX says
236 * "These functions shall not return an error code of [EINTR]".
237 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
238 * Let's hide it from arch generic code.
239 */
240 do {
241 rb_hrtime2timespec(&ts, abs);
242 r = pthread_cond_timedwait(cond, mutex, &ts);
243 } while (r == EINTR);
244
245 if (r != 0 && r != ETIMEDOUT) {
246 rb_bug_errno("pthread_cond_timedwait", r);
247 }
248
249 return r;
250}
251
252static rb_hrtime_t
253native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
254{
255 if (condattr_monotonic) {
256 return rb_hrtime_add(rb_hrtime_now(), rel);
257 }
258 else {
259 struct timespec ts;
260
261 rb_timespec_now(&ts);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
263 }
264}
265
266void
267rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
268{
269 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
270 native_cond_timedwait(cond, mutex, &hrmsec);
271}
272
273// thread scheduling
274
275static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
276static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
277
278#if 0
279static const char *
280event_name(rb_event_flag_t event)
281{
282 switch (event) {
284 return "STARTED";
286 return "READY";
288 return "RESUMED";
290 return "SUSPENDED";
292 return "EXITED";
293 }
294 return "no-event";
295}
296
297#define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
301 }
302#else
303#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
304#endif
305
306static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
307
308#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309# define USE_UBF_LIST 1
310#endif
311
312static void threadptr_trap_interrupt(rb_thread_t *);
313
314#ifdef HAVE_SCHED_YIELD
315#define native_thread_yield() (void)sched_yield()
316#else
317#define native_thread_yield() ((void)0)
318#endif
319
320static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
321static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
322static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
323
324static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
325static void timer_thread_wakeup(void);
326static void timer_thread_wakeup_locked(rb_vm_t *vm);
327static void timer_thread_wakeup_force(void);
328static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
329static void coroutine_transfer0(struct coroutine_context *transfer_from,
330 struct coroutine_context *transfer_to, bool to_dead);
331
332#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
333
334static bool
335th_has_dedicated_nt(const rb_thread_t *th)
336{
337 // TODO: th->has_dedicated_nt
338 return th->nt->dedicated > 0;
339}
340
342static void
343thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
344{
345 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
346 rb_thread_t *th;
347 int i = 0;
348 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
349 i++; if (i>10) rb_bug("too many");
350 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
351 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
352 th->nt ? (int)th->nt->serial : -1);
353 }
354}
355
356#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
357
359static void
360ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
361{
362 rb_ractor_t *r;
363
364 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
365
366 int i = 0;
367 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
368 i++;
369 if (i>10) rb_bug("!!");
370 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
371 }
372}
373
374#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
375#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
376
377static void
378thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
379{
380 rb_native_mutex_lock(&sched->lock_);
381
382#if VM_CHECK_MODE
383 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
384 VM_ASSERT(sched->lock_owner == NULL);
385 sched->lock_owner = th;
386#else
387 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
388#endif
389}
390
391static void
392thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
393{
394 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
395
396#if VM_CHECK_MODE
397 VM_ASSERT(sched->lock_owner == th);
398 sched->lock_owner = NULL;
399#endif
400
401 rb_native_mutex_unlock(&sched->lock_);
402}
403
404static void
405thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
406{
407 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
408
409#if VM_CHECK_MODE > 0
410 sched->lock_owner = th;
411#endif
412}
413
414static void
415ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
416{
417 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
418
419#if VM_CHECK_MODE
420 if (th) {
421 VM_ASSERT(sched->lock_owner == th);
422 }
423 else {
424 VM_ASSERT(sched->lock_owner != NULL);
425 }
426#endif
427}
428
429#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
430#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
431
433static unsigned int
434rb_ractor_serial(const rb_ractor_t *r) {
435 if (r) {
436 return rb_ractor_id(r);
437 }
438 else {
439 return 0;
440 }
441}
442
443static void
444ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
445{
446#if VM_CHECK_MODE > 0
447 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
448 VM_ASSERT(vm->ractor.sched.locked == false);
449
450 vm->ractor.sched.lock_owner = cr;
451 vm->ractor.sched.locked = true;
452#endif
453}
454
455static void
456ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
457{
458#if VM_CHECK_MODE > 0
459 VM_ASSERT(vm->ractor.sched.locked);
460 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
461
462 vm->ractor.sched.locked = false;
463 vm->ractor.sched.lock_owner = NULL;
464#endif
465}
466
467static void
468ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
469{
470 rb_native_mutex_lock(&vm->ractor.sched.lock);
471
472#if VM_CHECK_MODE
473 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
474#else
475 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
476#endif
477
478 ractor_sched_set_locked(vm, cr);
479}
480
481static void
482ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
483{
484 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
485
486 ractor_sched_set_unlocked(vm, cr);
487 rb_native_mutex_unlock(&vm->ractor.sched.lock);
488}
489
490static void
491ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
492{
493 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
494 VM_ASSERT(vm->ractor.sched.locked);
495 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
496}
497
499static bool
500ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
501{
502 rb_thread_t *rth;
503 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
504 if (rth == th) return true;
505 }
506 return false;
507}
508
510static unsigned int
511ractor_sched_running_threads_size(rb_vm_t *vm)
512{
513 rb_thread_t *th;
514 unsigned int i = 0;
515 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
516 i++;
517 }
518 return i;
519}
520
522static unsigned int
523ractor_sched_timeslice_threads_size(rb_vm_t *vm)
524{
525 rb_thread_t *th;
526 unsigned int i = 0;
527 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
528 i++;
529 }
530 return i;
531}
532
534static bool
535ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
536{
537 rb_thread_t *rth;
538 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
539 if (rth == th) return true;
540 }
541 return false;
542}
543
544static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
545static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
546
547// setup timeslice signals by the timer thread.
548static void
549thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
550 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
551{
552#if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
554#endif
555
556 rb_thread_t *del_timeslice_th;
557
558 if (del_th && sched->is_running_timeslice) {
559 del_timeslice_th = del_th;
560 sched->is_running_timeslice = false;
561 }
562 else {
563 del_timeslice_th = NULL;
564 }
565
566 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th), rb_th_serial(del_th),
568 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
569
570 ractor_sched_lock(vm, cr);
571 {
572 // update running_threads
573 if (del_th) {
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
575 VM_ASSERT(del_timeslice_th != NULL ||
576 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
577
578 ccan_list_del_init(&del_th->sched.node.running_threads);
579 vm->ractor.sched.running_cnt--;
580
581 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
582 ractor_sched_barrier_join_signal_locked(vm);
583 }
584 sched->is_running = false;
585 }
586
587 if (add_th) {
588 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 RUBY_DEBUG_LOG("barrier-wait");
590
591 ractor_sched_barrier_join_signal_locked(vm);
592 ractor_sched_barrier_join_wait_locked(vm, add_th);
593 }
594
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
597
598 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
599 vm->ractor.sched.running_cnt++;
600 sched->is_running = true;
601 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
602 }
603
604 if (add_timeslice_th) {
605 // update timeslice threads
606 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
608 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
609 sched->is_running_timeslice = true;
610 if (was_empty) {
611 timer_thread_wakeup_locked(vm);
612 }
613 }
614
615 if (del_timeslice_th) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
617 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
618 }
619
620 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
622 }
623 ractor_sched_unlock(vm, cr);
624
625 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
626 // it can be after barrier synchronization by another ractor
627 rb_thread_t *lock_owner = NULL;
628#if VM_CHECK_MODE
629 lock_owner = sched->lock_owner;
630#endif
631 thread_sched_unlock(sched, lock_owner);
632 {
633 RB_VM_LOCK_ENTER();
634 RB_VM_LOCK_LEAVE();
635 }
636 thread_sched_lock(sched, lock_owner);
637 }
638
639 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
640 // rb_th_serial(add_th), rb_th_serial(del_th),
641 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
642 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
643}
644
645static void
646thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
647{
648 ASSERT_thread_sched_locked(sched, th);
649 VM_ASSERT(sched->running == th);
650
651 rb_vm_t *vm = th->vm;
652 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
653}
654
655static void
656thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
657{
658 ASSERT_thread_sched_locked(sched, th);
659
660 rb_vm_t *vm = th->vm;
661 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
662}
663
664void
665rb_add_running_thread(rb_thread_t *th)
666{
667 struct rb_thread_sched *sched = TH_SCHED(th);
668
669 thread_sched_lock(sched, th);
670 {
671 thread_sched_add_running_thread(sched, th);
672 }
673 thread_sched_unlock(sched, th);
674}
675
676void
677rb_del_running_thread(rb_thread_t *th)
678{
679 struct rb_thread_sched *sched = TH_SCHED(th);
680
681 thread_sched_lock(sched, th);
682 {
683 thread_sched_del_running_thread(sched, th);
684 }
685 thread_sched_unlock(sched, th);
686}
687
688// setup current or next running thread
689// sched->running should be set only on this function.
690//
691// if th is NULL, there is no running threads.
692static void
693thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
694{
695 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
696 VM_ASSERT(sched->running != th);
697
698 sched->running = th;
699}
700
702static bool
703thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
704{
705 rb_thread_t *rth;
706 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
707 if (rth == th) return true;
708 }
709 return false;
710}
711
712// deque thread from the ready queue.
713// if the ready queue is empty, return NULL.
714//
715// return deque'ed running thread (or NULL).
716static rb_thread_t *
717thread_sched_deq(struct rb_thread_sched *sched)
718{
719 ASSERT_thread_sched_locked(sched, NULL);
720 rb_thread_t *next_th;
721
722 VM_ASSERT(sched->running != NULL);
723
724 if (ccan_list_empty(&sched->readyq)) {
725 next_th = NULL;
726 }
727 else {
728 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
729
730 VM_ASSERT(sched->readyq_cnt > 0);
731 sched->readyq_cnt--;
732 ccan_list_node_init(&next_th->sched.node.readyq);
733 }
734
735 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
736
737 return next_th;
738}
739
740// enqueue ready thread to the ready queue.
741static void
742thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
743{
744 ASSERT_thread_sched_locked(sched, NULL);
745 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
746
747 VM_ASSERT(sched->running != NULL);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
749
750 if (sched->is_running) {
751 if (ccan_list_empty(&sched->readyq)) {
752 // add sched->running to timeslice
753 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
754 }
755 }
756 else {
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
758 }
759
760 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
761 sched->readyq_cnt++;
762}
763
764// DNT: kick condvar
765// SNT: TODO
766static void
767thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
768{
769 ASSERT_thread_sched_locked(sched, NULL);
770 VM_ASSERT(sched->running == next_th);
771
772 if (next_th) {
773 if (next_th->nt) {
774 if (th_has_dedicated_nt(next_th)) {
775 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
776 rb_native_cond_signal(&next_th->nt->cond.readyq);
777 }
778 else {
779 // TODO
780 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
781 }
782 }
783 else {
784 if (will_switch) {
785 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
786 }
787 else {
788 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
789 ractor_sched_enq(next_th->vm, next_th->ractor);
790 }
791 }
792 }
793 else {
794 RUBY_DEBUG_LOG("no waiting threads%s", "");
795 }
796}
797
798// waiting -> ready (locked)
799static void
800thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
801{
802 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
803
804 VM_ASSERT(sched->running != th);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
806 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
807
808 if (sched->running == NULL) {
809 thread_sched_set_running(sched, th);
810 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
811 }
812 else {
813 thread_sched_enq(sched, th);
814 }
815}
816
817// waiting -> ready
818//
819// `th` had became "waiting" state by `thread_sched_to_waiting`
820// and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
822static void
823thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
824{
825 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
826
827 thread_sched_lock(sched, th);
828 {
829 thread_sched_to_ready_common(sched, th, true, false);
830 }
831 thread_sched_unlock(sched, th);
832}
833
834// wait until sched->running is `th`.
835static void
836thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
837{
838 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
839
840 ASSERT_thread_sched_locked(sched, th);
841 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
842
843 if (th != sched->running) {
844 // already deleted from running threads
845 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
846
847 // wait for execution right
848 rb_thread_t *next_th;
849 while((next_th = sched->running) != th) {
850 if (th_has_dedicated_nt(th)) {
851 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
852
853 thread_sched_set_lock_owner(sched, NULL);
854 {
855 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
856 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
857 }
858 thread_sched_set_lock_owner(sched, th);
859
860 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
861 if (th == sched->running) {
862 rb_ractor_thread_switch(th->ractor, th);
863 }
864 }
865 else {
866 // search another ready thread
867 if (can_direct_transfer &&
868 (next_th = sched->running) != NULL &&
869 !next_th->nt // next_th is running or has dedicated nt
870 ) {
871
872 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
873
874 thread_sched_set_lock_owner(sched, NULL);
875 {
876 rb_ractor_set_current_ec(th->ractor, NULL);
877 thread_sched_switch(th, next_th);
878 }
879 thread_sched_set_lock_owner(sched, th);
880 }
881 else {
882 // search another ready ractor
883 struct rb_native_thread *nt = th->nt;
884 native_thread_assign(NULL, th);
885
886 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
887
888 thread_sched_set_lock_owner(sched, NULL);
889 {
890 rb_ractor_set_current_ec(th->ractor, NULL);
891 coroutine_transfer0(th->sched.context, nt->nt_context, false);
892 }
893 thread_sched_set_lock_owner(sched, th);
894 }
895
896 VM_ASSERT(rb_current_ec_noinline() == th->ec);
897 }
898 }
899
900 VM_ASSERT(th->nt != NULL);
901 VM_ASSERT(rb_current_ec_noinline() == th->ec);
902 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
903
904 // add th to running threads
905 thread_sched_add_running_thread(sched, th);
906 }
907
908 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
909 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
910}
911
912// waiting -> ready -> running (locked)
913static void
914thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
915{
916 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
917
918 VM_ASSERT(sched->running != th);
919 VM_ASSERT(th_has_dedicated_nt(th));
920 VM_ASSERT(GET_THREAD() == th);
921
922 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
923
924 // waiting -> ready
925 thread_sched_to_ready_common(sched, th, false, false);
926
927 if (sched->running == th) {
928 thread_sched_add_running_thread(sched, th);
929 }
930
931 // TODO: check SNT number
932 thread_sched_wait_running_turn(sched, th, false);
933}
934
935// waiting -> ready -> running
936//
937// `th` had been waiting by `thread_sched_to_waiting()`
938// and run a dedicated task (like waitpid and so on).
939// After the dedicated task, this function is called
940// to join a normal thread-scheduling.
941static void
942thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
943{
944 thread_sched_lock(sched, th);
945 {
946 thread_sched_to_running_common(sched, th);
947 }
948 thread_sched_unlock(sched, th);
949}
950
951// resume a next thread in the thread ready queue.
952//
953// deque next running thread from the ready thread queue and
954// resume this thread if available.
955//
956// If the next therad has a dedicated native thraed, simply signal to resume.
957// Otherwise, make the ractor ready and other nt will run the ractor and the thread.
958static void
959thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
960{
961 ASSERT_thread_sched_locked(sched, th);
962
963 VM_ASSERT(sched->running == th);
964 VM_ASSERT(sched->running->nt != NULL);
965
966 rb_thread_t *next_th = thread_sched_deq(sched);
967
968 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
969 VM_ASSERT(th != next_th);
970
971 thread_sched_set_running(sched, next_th);
972 VM_ASSERT(next_th == sched->running);
973 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
974
975 if (th != next_th) {
976 thread_sched_del_running_thread(sched, th);
977 }
978}
979
980// running -> waiting
981//
982// to_dead: false
983// th will run dedicated task.
984// run another ready thread.
985// to_dead: true
986// th will be dead.
987// run another ready thread.
988static void
989thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
990{
991 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
992
993 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
994
995 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
996
997 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
998 thread_sched_wakeup_next_thread(sched, th, can_switch);
999}
1000
1001// running -> dead (locked)
1002static void
1003thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1004{
1005 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1006 thread_sched_to_waiting_common0(sched, th, true);
1007 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1008}
1009
1010// running -> dead
1011static void
1012thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1013{
1014 thread_sched_lock(sched, th);
1015 {
1016 thread_sched_to_dead_common(sched, th);
1017 }
1018 thread_sched_unlock(sched, th);
1019}
1020
1021// running -> waiting (locked)
1022//
1023// This thread will run dedicated task (th->nt->dedicated++).
1024static void
1025thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1026{
1027 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1028 thread_sched_to_waiting_common0(sched, th, false);
1029}
1030
1031// running -> waiting
1032//
1033// This thread will run a dedicated task.
1034static void
1035thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1036{
1037 thread_sched_lock(sched, th);
1038 {
1039 thread_sched_to_waiting_common(sched, th);
1040 }
1041 thread_sched_unlock(sched, th);
1042}
1043
1044// mini utility func
1045static void
1046setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1047{
1048 rb_native_mutex_lock(&th->interrupt_lock);
1049 {
1050 th->unblock.func = func;
1051 th->unblock.arg = arg;
1052 }
1053 rb_native_mutex_unlock(&th->interrupt_lock);
1054}
1055
1056static void
1057ubf_waiting(void *ptr)
1058{
1059 rb_thread_t *th = (rb_thread_t *)ptr;
1060 struct rb_thread_sched *sched = TH_SCHED(th);
1061
1062 // only once. it is safe because th->interrupt_lock is already acquired.
1063 th->unblock.func = NULL;
1064 th->unblock.arg = NULL;
1065
1066 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1067
1068 thread_sched_lock(sched, th);
1069 {
1070 if (sched->running == th) {
1071 // not sleeping yet.
1072 }
1073 else {
1074 thread_sched_to_ready_common(sched, th, true, false);
1075 }
1076 }
1077 thread_sched_unlock(sched, th);
1078}
1079
1080// running -> waiting
1081//
1082// This thread will sleep until other thread wakeup the thread.
1083static void
1084thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1085{
1086 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1087
1088 RB_VM_SAVE_MACHINE_CONTEXT(th);
1089 setup_ubf(th, ubf_waiting, (void *)th);
1090
1091 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1092
1093 thread_sched_lock(sched, th);
1094 {
1095 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1096 bool can_direct_transfer = !th_has_dedicated_nt(th);
1097 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1098 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1099 }
1100 else {
1101 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1102 }
1103 }
1104 thread_sched_unlock(sched, th);
1105
1106 setup_ubf(th, NULL, NULL);
1107}
1108
1109// run another thread in the ready queue.
1110// continue to run if there are no ready threads.
1111static void
1112thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1113{
1114 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1115
1116 thread_sched_lock(sched, th);
1117 {
1118 if (!ccan_list_empty(&sched->readyq)) {
1119 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1120 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1121 bool can_direct_transfer = !th_has_dedicated_nt(th);
1122 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1123 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1124 }
1125 else {
1126 VM_ASSERT(sched->readyq_cnt == 0);
1127 }
1128 }
1129 thread_sched_unlock(sched, th);
1130}
1131
1132void
1133rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1134{
1135 rb_native_mutex_initialize(&sched->lock_);
1136
1137#if VM_CHECK_MODE
1138 sched->lock_owner = NULL;
1139#endif
1140
1141 ccan_list_head_init(&sched->readyq);
1142 sched->readyq_cnt = 0;
1143
1144#if USE_MN_THREADS
1145 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1146#endif
1147}
1148
1149static void
1150coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
1151{
1152#ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1154 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1155#endif
1156
1158 struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1159
1160 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1161 * never be resumed! */
1162 VM_ASSERT(!to_dead);
1163#ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1165 (const void**)&returning_from->stack_base, &returning_from->stack_size);
1166#endif
1167
1168}
1169
1170static void
1171thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
1172{
1173 VM_ASSERT(!nt->dedicated);
1174 VM_ASSERT(next_th->nt == NULL);
1175
1176 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1177
1178 ruby_thread_set_native(next_th);
1179 native_thread_assign(nt, next_th);
1180
1181 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1182}
1183
1184static void
1185thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1186{
1187 struct rb_native_thread *nt = cth->nt;
1188 native_thread_assign(NULL, cth);
1189 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1190 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1191}
1192
1193#if VM_CHECK_MODE > 0
1195static unsigned int
1196grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1197{
1198 ASSERT_ractor_sched_locked(vm, cr);
1199
1200 rb_ractor_t *r, *prev_r = NULL;
1201 unsigned int i = 0;
1202
1203 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1204 i++;
1205
1206 VM_ASSERT(r != prev_r);
1207 prev_r = r;
1208 }
1209 return i;
1210}
1211#endif
1212
1213static void
1214ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1215{
1216 struct rb_thread_sched *sched = &r->threads.sched;
1217 rb_ractor_t *cr = NULL; // timer thread can call this function
1218
1219 VM_ASSERT(sched->running != NULL);
1220 VM_ASSERT(sched->running->nt == NULL);
1221
1222 ractor_sched_lock(vm, cr);
1223 {
1224#if VM_CHECK_MODE > 0
1225 // check if grq contains r
1226 rb_ractor_t *tr;
1227 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1228 VM_ASSERT(r != tr);
1229 }
1230#endif
1231
1232 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1233 vm->ractor.sched.grq_cnt++;
1234 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1235
1236 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1237
1238 rb_native_cond_signal(&vm->ractor.sched.cond);
1239
1240 // ractor_sched_dump(vm);
1241 }
1242 ractor_sched_unlock(vm, cr);
1243}
1244
1245
1246#ifndef SNT_KEEP_SECONDS
1247#define SNT_KEEP_SECONDS 0
1248#endif
1249
1250#ifndef MINIMUM_SNT
1251// make at least MINIMUM_SNT snts for debug.
1252#define MINIMUM_SNT 0
1253#endif
1254
1255static rb_ractor_t *
1256ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1257{
1258 rb_ractor_t *r;
1259
1260 ractor_sched_lock(vm, cr);
1261 {
1262 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1263 // ractor_sched_dump(vm);
1264
1265 VM_ASSERT(rb_current_execution_context(false) == NULL);
1266 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1267
1268 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1269 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1270
1271#if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1273 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1274 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1275 VM_ASSERT(r == NULL);
1276 vm->ractor.sched.snt_cnt--;
1277 vm->ractor.sched.running_cnt--;
1278 break;
1279 }
1280 else {
1281 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1282 }
1283#else
1284 ractor_sched_set_unlocked(vm, cr);
1285 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1286 ractor_sched_set_locked(vm, cr);
1287
1288 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1289#endif
1290 }
1291
1292 VM_ASSERT(rb_current_execution_context(false) == NULL);
1293
1294 if (r) {
1295 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1296 vm->ractor.sched.grq_cnt--;
1297 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1298 }
1299 else {
1300 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1301 // timeout
1302 }
1303 }
1304 ractor_sched_unlock(vm, cr);
1305
1306 return r;
1307}
1308
1309void rb_ractor_lock_self(rb_ractor_t *r);
1310void rb_ractor_unlock_self(rb_ractor_t *r);
1311
1312void
1313rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1314{
1315 // ractor lock of cr is acquired
1316 // r is sleeping status
1317 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1318 struct rb_thread_sched *sched = TH_SCHED(th);
1319 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1320
1321 setup_ubf(th, ubf, (void *)cr);
1322
1323 thread_sched_lock(sched, th);
1324 {
1325 rb_ractor_unlock_self(cr);
1326 {
1327 if (RUBY_VM_INTERRUPTED(th->ec)) {
1328 RUBY_DEBUG_LOG("interrupted");
1329 }
1330 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1331 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1332 }
1333 else {
1334 // sleep
1335 RB_VM_SAVE_MACHINE_CONTEXT(th);
1336 th->status = THREAD_STOPPED_FOREVER;
1337
1338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1339
1340 bool can_direct_transfer = !th_has_dedicated_nt(th);
1341 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1342 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1343 th->status = THREAD_RUNNABLE;
1344 // wakeup
1345 }
1346 }
1347 }
1348 thread_sched_unlock(sched, th);
1349
1350 setup_ubf(th, NULL, NULL);
1351
1352 rb_ractor_lock_self(cr);
1353 cr->sync.wait.waiting_thread = NULL;
1354}
1355
1356void
1357rb_ractor_sched_wakeup(rb_ractor_t *r)
1358{
1359 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1360 // ractor lock of r is acquired
1361 struct rb_thread_sched *sched = TH_SCHED(r_th);
1362
1363 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1364
1365 thread_sched_lock(sched, r_th);
1366 {
1367 if (r_th->status == THREAD_STOPPED_FOREVER) {
1368 thread_sched_to_ready_common(sched, r_th, true, false);
1369 }
1370 }
1371 thread_sched_unlock(sched, r_th);
1372}
1373
1374static bool
1375ractor_sched_barrier_completed_p(rb_vm_t *vm)
1376{
1377 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1378 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1379 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1380}
1381
1382void
1383rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1384{
1385 VM_ASSERT(cr == GET_RACTOR());
1386 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1387 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1388 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1389
1390 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1391
1392 unsigned int lock_rec;
1393
1394 ractor_sched_lock(vm, cr);
1395 {
1396 vm->ractor.sched.barrier_waiting = true;
1397
1398 // release VM lock
1399 lock_rec = vm->ractor.sync.lock_rec;
1400 vm->ractor.sync.lock_rec = 0;
1401 vm->ractor.sync.lock_owner = NULL;
1402 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1403 {
1404 // interrupts all running threads
1405 rb_thread_t *ith;
1406 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1407 if (ith->ractor != cr) {
1408 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1410 }
1411 }
1412
1413 // wait for other ractors
1414 while (!ractor_sched_barrier_completed_p(vm)) {
1415 ractor_sched_set_unlocked(vm, cr);
1416 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1417 ractor_sched_set_locked(vm, cr);
1418 }
1419 }
1420 }
1421 ractor_sched_unlock(vm, cr);
1422
1423 // acquire VM lock
1424 rb_native_mutex_lock(&vm->ractor.sync.lock);
1425 vm->ractor.sync.lock_rec = lock_rec;
1426 vm->ractor.sync.lock_owner = cr;
1427
1428 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1429
1430 ractor_sched_lock(vm, cr);
1431 {
1432 vm->ractor.sched.barrier_waiting = false;
1433 vm->ractor.sched.barrier_serial++;
1434 vm->ractor.sched.barrier_waiting_cnt = 0;
1435 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1436 }
1437 ractor_sched_unlock(vm, cr);
1438}
1439
1440static void
1441ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1442{
1443 if (ractor_sched_barrier_completed_p(vm)) {
1444 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1445 }
1446}
1447
1448static void
1449ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1450{
1451 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1452
1453 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1454
1455 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1456 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1458
1459 rb_ractor_t *cr = th->ractor;
1460 ractor_sched_set_unlocked(vm, cr);
1461 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1462 ractor_sched_set_locked(vm, cr);
1463
1464 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1465 }
1466}
1467
1468void
1469rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1470{
1471 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1472 VM_ASSERT(cr == GET_RACTOR());
1473 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1474 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1475
1476#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1478#endif
1479
1480 RUBY_DEBUG_LOG("join");
1481
1482 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1483 {
1484 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1485 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1486
1487 ractor_sched_lock(vm, cr);
1488 {
1489 // running_cnt
1490 vm->ractor.sched.barrier_waiting_cnt++;
1491 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1492
1493 ractor_sched_barrier_join_signal_locked(vm);
1494 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1495 }
1496 ractor_sched_unlock(vm, cr);
1497 }
1498
1499 rb_native_mutex_lock(&vm->ractor.sync.lock);
1500 // VM locked here
1501}
1502
1503#if 0
1504// TODO
1505
1506static void clear_thread_cache_altstack(void);
1507
1508static void
1509rb_thread_sched_destroy(struct rb_thread_sched *sched)
1510{
1511 /*
1512 * only called once at VM shutdown (not atfork), another thread
1513 * may still grab vm->gvl.lock when calling gvl_release at
1514 * the end of thread_start_func_2
1515 */
1516 if (0) {
1517 rb_native_mutex_destroy(&sched->lock);
1518 }
1519 clear_thread_cache_altstack();
1520}
1521#endif
1522
1523#ifdef RB_THREAD_T_HAS_NATIVE_ID
1524static int
1525get_native_thread_id(void)
1526{
1527#ifdef __linux__
1528 return (int)syscall(SYS_gettid);
1529#elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1531#endif
1532}
1533#endif
1534
1535#if defined(HAVE_WORKING_FORK)
1536static void
1537thread_sched_atfork(struct rb_thread_sched *sched)
1538{
1539 current_fork_gen++;
1540 rb_thread_sched_init(sched, true);
1541 rb_thread_t *th = GET_THREAD();
1542 rb_vm_t *vm = GET_VM();
1543
1544 if (th_has_dedicated_nt(th)) {
1545 vm->ractor.sched.snt_cnt = 0;
1546 }
1547 else {
1548 vm->ractor.sched.snt_cnt = 1;
1549 }
1550 vm->ractor.sched.running_cnt = 0;
1551
1552 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1553#if VM_CHECK_MODE > 0
1554 vm->ractor.sched.lock_owner = NULL;
1555 vm->ractor.sched.locked = false;
1556#endif
1557
1558 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm->ractor.sched.cond);
1560 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1561 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1562
1563 ccan_list_head_init(&vm->ractor.sched.grq);
1564 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1565 ccan_list_head_init(&vm->ractor.sched.running_threads);
1566
1567 VM_ASSERT(sched->is_running);
1568 sched->is_running_timeslice = false;
1569
1570 if (sched->running != th) {
1571 thread_sched_to_running(sched, th);
1572 }
1573 else {
1574 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1575 }
1576
1577#ifdef RB_THREAD_T_HAS_NATIVE_ID
1578 if (th->nt) {
1579 th->nt->tid = get_native_thread_id();
1580 }
1581#endif
1582}
1583
1584#endif
1585
1586#ifdef RB_THREAD_LOCAL_SPECIFIER
1587static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1588#else
1589static pthread_key_t ruby_native_thread_key;
1590#endif
1591
1592static void
1593null_func(int i)
1594{
1595 /* null */
1596 // This function can be called from signal handler
1597 // RUBY_DEBUG_LOG("i:%d", i);
1598}
1599
1600rb_thread_t *
1601ruby_thread_from_native(void)
1602{
1603#ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread;
1605#else
1606 return pthread_getspecific(ruby_native_thread_key);
1607#endif
1608}
1609
1610int
1611ruby_thread_set_native(rb_thread_t *th)
1612{
1613 if (th) {
1614#ifdef USE_UBF_LIST
1615 ccan_list_node_init(&th->sched.node.ubf);
1616#endif
1617 }
1618
1619 // setup TLS
1620
1621 if (th && th->ec) {
1622 rb_ractor_set_current_ec(th->ractor, th->ec);
1623 }
1624#ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread = th;
1626 return 1;
1627#else
1628 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1629#endif
1630}
1631
1632static void native_thread_setup(struct rb_native_thread *nt);
1633static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1634
1635void
1636Init_native_thread(rb_thread_t *main_th)
1637{
1638#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic) {
1640 int r = pthread_condattr_init(condattr_monotonic);
1641 if (r == 0) {
1642 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1643 }
1644 if (r) condattr_monotonic = NULL;
1645 }
1646#endif
1647
1648#ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1650 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1651 }
1652 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1653 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1654 }
1655#endif
1656 ruby_posix_signal(SIGVTALRM, null_func);
1657
1658 // setup vm
1659 rb_vm_t *vm = main_th->vm;
1660 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1661 rb_native_cond_initialize(&vm->ractor.sched.cond);
1662 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1663 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1664
1665 ccan_list_head_init(&vm->ractor.sched.grq);
1666 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1667 ccan_list_head_init(&vm->ractor.sched.running_threads);
1668
1669 // setup main thread
1670 main_th->nt->thread_id = pthread_self();
1671 main_th->nt->serial = 1;
1672#ifdef RUBY_NT_SERIAL
1673 ruby_nt_serial = 1;
1674#endif
1675 ruby_thread_set_native(main_th);
1676 native_thread_setup(main_th->nt);
1677 native_thread_setup_on_thread(main_th->nt);
1678
1679 TH_SCHED(main_th)->running = main_th;
1680 main_th->has_dedicated_nt = 1;
1681
1682 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1683
1684 // setup main NT
1685 main_th->nt->dedicated = 1;
1686 main_th->nt->vm = vm;
1687
1688 // setup mn
1689 vm->ractor.sched.dnt_cnt = 1;
1690}
1691
1692extern int ruby_mn_threads_enabled;
1693
1694void
1695ruby_mn_threads_params(void)
1696{
1697 rb_vm_t *vm = GET_VM();
1698 rb_ractor_t *main_ractor = GET_RACTOR();
1699
1700 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1701 bool enable_mn_threads = false;
1702
1703 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1704 // enabled
1705 ruby_mn_threads_enabled = 1;
1706 }
1707 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1708
1709 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1710 const int default_max_cpu = 8; // TODO: CPU num?
1711 int max_cpu = default_max_cpu;
1712
1713 if (USE_MN_THREADS && max_cpu_cstr) {
1714 int given_max_cpu = atoi(max_cpu_cstr);
1715 if (given_max_cpu > 0) {
1716 max_cpu = given_max_cpu;
1717 }
1718 }
1719
1720 vm->ractor.sched.max_cpu = max_cpu;
1721}
1722
1723static void
1724native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1725{
1726 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1727
1728 if (nt->dedicated == 0) {
1729 ractor_sched_lock(vm, cr);
1730 {
1731 vm->ractor.sched.snt_cnt--;
1732 vm->ractor.sched.dnt_cnt++;
1733 }
1734 ractor_sched_unlock(vm, cr);
1735 }
1736
1737 nt->dedicated++;
1738}
1739
1740static void
1741native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1742{
1743 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1744 VM_ASSERT(nt->dedicated > 0);
1745 nt->dedicated--;
1746
1747 if (nt->dedicated == 0) {
1748 ractor_sched_lock(vm, cr);
1749 {
1750 nt->vm->ractor.sched.snt_cnt++;
1751 nt->vm->ractor.sched.dnt_cnt--;
1752 }
1753 ractor_sched_unlock(vm, cr);
1754 }
1755}
1756
1757static void
1758native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1759{
1760#if USE_RUBY_DEBUG_LOG
1761 if (nt) {
1762 if (th->nt) {
1763 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1764 }
1765 else {
1766 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1767 }
1768 }
1769 else {
1770 if (th->nt) {
1771 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1772 }
1773 else {
1774 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1775 }
1776 }
1777#endif
1778
1779 th->nt = nt;
1780}
1781
1782static void
1783native_thread_destroy(struct rb_native_thread *nt)
1784{
1785 if (nt) {
1786 rb_native_cond_destroy(&nt->cond.readyq);
1787
1788 if (&nt->cond.readyq != &nt->cond.intr) {
1789 rb_native_cond_destroy(&nt->cond.intr);
1790 }
1791
1792 RB_ALTSTACK_FREE(nt->altstack);
1793 ruby_xfree(nt->nt_context);
1794 ruby_xfree(nt);
1795 }
1796}
1797
1798#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799#define STACKADDR_AVAILABLE 1
1800#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801#define STACKADDR_AVAILABLE 1
1802#undef MAINSTACKADDR_AVAILABLE
1803#define MAINSTACKADDR_AVAILABLE 1
1804void *pthread_get_stackaddr_np(pthread_t);
1805size_t pthread_get_stacksize_np(pthread_t);
1806#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807#define STACKADDR_AVAILABLE 1
1808#elif defined HAVE_PTHREAD_GETTHRDS_NP
1809#define STACKADDR_AVAILABLE 1
1810#elif defined __HAIKU__
1811#define STACKADDR_AVAILABLE 1
1812#endif
1813
1814#ifndef MAINSTACKADDR_AVAILABLE
1815# ifdef STACKADDR_AVAILABLE
1816# define MAINSTACKADDR_AVAILABLE 1
1817# else
1818# define MAINSTACKADDR_AVAILABLE 0
1819# endif
1820#endif
1821#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822# define get_main_stack(addr, size) get_stack(addr, size)
1823#endif
1824
1825#ifdef STACKADDR_AVAILABLE
1826/*
1827 * Get the initial address and size of current thread's stack
1828 */
1829static int
1830get_stack(void **addr, size_t *size)
1831{
1832#define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1835 pthread_attr_t attr;
1836 size_t guard = 0;
1837 STACK_GROW_DIR_DETECTION;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1839# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1841 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1842# else
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1845# endif
1846# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1848# else
1849 guard = getpagesize();
1850# endif
1851 *size -= guard;
1852 pthread_attr_destroy(&attr);
1853#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1854 pthread_attr_t attr;
1855 CHECK_ERR(pthread_attr_init(&attr));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1857# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1859# else
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1862# endif
1863 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1864 pthread_attr_destroy(&attr);
1865#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1866 pthread_t th = pthread_self();
1867 *addr = pthread_get_stackaddr_np(th);
1868 *size = pthread_get_stacksize_np(th);
1869#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1870 stack_t stk;
1871# if defined HAVE_THR_STKSEGMENT /* Solaris */
1872 CHECK_ERR(thr_stksegment(&stk));
1873# else /* OpenBSD */
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1875# endif
1876 *addr = stk.ss_sp;
1877 *size = stk.ss_size;
1878#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1879 pthread_t th = pthread_self();
1880 struct __pthrdsinfo thinfo;
1881 char reg[256];
1882 int regsiz=sizeof(reg);
1883 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1884 &thinfo, sizeof(thinfo),
1885 &reg, &regsiz));
1886 *addr = thinfo.__pi_stackaddr;
1887 /* Must not use thinfo.__pi_stacksize for size.
1888 It is around 3KB smaller than the correct size
1889 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1890 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1891 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1892#elif defined __HAIKU__
1893 thread_info info;
1894 STACK_GROW_DIR_DETECTION;
1895 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1896 *addr = info.stack_base;
1897 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1898 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1899#else
1900#error STACKADDR_AVAILABLE is defined but not implemented.
1901#endif
1902 return 0;
1903#undef CHECK_ERR
1904}
1905#endif
1906
1907static struct {
1908 rb_nativethread_id_t id;
1909 size_t stack_maxsize;
1910 VALUE *stack_start;
1911} native_main_thread;
1912
1913#ifdef STACK_END_ADDRESS
1914extern void *STACK_END_ADDRESS;
1915#endif
1916
1917enum {
1918 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1919 RUBY_STACK_SPACE_RATIO = 5
1920};
1921
1922static size_t
1923space_size(size_t stack_size)
1924{
1925 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1926 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1927 return RUBY_STACK_SPACE_LIMIT;
1928 }
1929 else {
1930 return space_size;
1931 }
1932}
1933
1934static void
1935native_thread_init_main_thread_stack(void *addr)
1936{
1937 native_main_thread.id = pthread_self();
1938#ifdef RUBY_ASAN_ENABLED
1939 addr = asan_get_real_stack_addr((void *)addr);
1940#endif
1941
1942#if MAINSTACKADDR_AVAILABLE
1943 if (native_main_thread.stack_maxsize) return;
1944 {
1945 void* stackaddr;
1946 size_t size;
1947 if (get_main_stack(&stackaddr, &size) == 0) {
1948 native_main_thread.stack_maxsize = size;
1949 native_main_thread.stack_start = stackaddr;
1950 goto bound_check;
1951 }
1952 }
1953#endif
1954#ifdef STACK_END_ADDRESS
1955 native_main_thread.stack_start = STACK_END_ADDRESS;
1956#else
1957 if (!native_main_thread.stack_start ||
1958 STACK_UPPER((VALUE *)(void *)&addr,
1959 native_main_thread.stack_start > (VALUE *)addr,
1960 native_main_thread.stack_start < (VALUE *)addr)) {
1961 native_main_thread.stack_start = (VALUE *)addr;
1962 }
1963#endif
1964 {
1965#if defined(HAVE_GETRLIMIT)
1966#if defined(PTHREAD_STACK_DEFAULT)
1967# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1968# error "PTHREAD_STACK_DEFAULT is too small"
1969# endif
1970 size_t size = PTHREAD_STACK_DEFAULT;
1971#else
1972 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1973#endif
1974 size_t space;
1975 int pagesize = getpagesize();
1976 struct rlimit rlim;
1977 STACK_GROW_DIR_DETECTION;
1978 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1979 size = (size_t)rlim.rlim_cur;
1980 }
1981 addr = native_main_thread.stack_start;
1982 if (IS_STACK_DIR_UPPER()) {
1983 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1984 }
1985 else {
1986 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
1987 }
1988 native_main_thread.stack_maxsize = space;
1989#endif
1990 }
1991
1992#if MAINSTACKADDR_AVAILABLE
1993 bound_check:
1994#endif
1995 /* If addr is out of range of main-thread stack range estimation, */
1996 /* it should be on co-routine (alternative stack). [Feature #2294] */
1997 {
1998 void *start, *end;
1999 STACK_GROW_DIR_DETECTION;
2000
2001 if (IS_STACK_DIR_UPPER()) {
2002 start = native_main_thread.stack_start;
2003 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2004 }
2005 else {
2006 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2007 end = native_main_thread.stack_start;
2008 }
2009
2010 if ((void *)addr < start || (void *)addr > end) {
2011 /* out of range */
2012 native_main_thread.stack_start = (VALUE *)addr;
2013 native_main_thread.stack_maxsize = 0; /* unknown */
2014 }
2015 }
2016}
2017
2018#define CHECK_ERR(expr) \
2019 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2020
2021static int
2022native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
2023{
2024 rb_nativethread_id_t curr = pthread_self();
2025#ifdef RUBY_ASAN_ENABLED
2026 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2027 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2028#endif
2029
2030 if (!native_main_thread.id) {
2031 /* This thread is the first thread, must be the main thread -
2032 * configure the native_main_thread object */
2033 native_thread_init_main_thread_stack(local_in_parent_frame);
2034 }
2035
2036 if (pthread_equal(curr, native_main_thread.id)) {
2037 th->ec->machine.stack_start = native_main_thread.stack_start;
2038 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2039 }
2040 else {
2041#ifdef STACKADDR_AVAILABLE
2042 if (th_has_dedicated_nt(th)) {
2043 void *start;
2044 size_t size;
2045
2046 if (get_stack(&start, &size) == 0) {
2047 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2048 th->ec->machine.stack_start = local_in_parent_frame;
2049 th->ec->machine.stack_maxsize = size - diff;
2050 }
2051 }
2052#else
2053 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2054#endif
2055 }
2056
2057 return 0;
2058}
2059
2060struct nt_param {
2061 rb_vm_t *vm;
2062 struct rb_native_thread *nt;
2063};
2064
2065static void *
2066nt_start(void *ptr);
2067
2068static int
2069native_thread_create0(struct rb_native_thread *nt)
2070{
2071 int err = 0;
2072 pthread_attr_t attr;
2073
2074 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2075 const size_t space = space_size(stack_size);
2076
2077 nt->machine_stack_maxsize = stack_size - space;
2078
2079#ifdef USE_SIGALTSTACK
2080 nt->altstack = rb_allocate_sigaltstack();
2081#endif
2082
2083 CHECK_ERR(pthread_attr_init(&attr));
2084
2085# ifdef PTHREAD_STACK_MIN
2086 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2087 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2088# endif
2089
2090# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2091 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2092# endif
2093 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2094
2095 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2096
2097 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2098
2099 CHECK_ERR(pthread_attr_destroy(&attr));
2100
2101 return err;
2102}
2103
2104static void
2105native_thread_setup(struct rb_native_thread *nt)
2106{
2107 // init cond
2108 rb_native_cond_initialize(&nt->cond.readyq);
2109
2110 if (&nt->cond.readyq != &nt->cond.intr) {
2111 rb_native_cond_initialize(&nt->cond.intr);
2112 }
2113}
2114
2115static void
2116native_thread_setup_on_thread(struct rb_native_thread *nt)
2117{
2118 // init tid
2119#ifdef RB_THREAD_T_HAS_NATIVE_ID
2120 nt->tid = get_native_thread_id();
2121#endif
2122
2123 // init signal handler
2124 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2125}
2126
2127static struct rb_native_thread *
2128native_thread_alloc(void)
2129{
2130 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2131 native_thread_setup(nt);
2132
2133#if USE_MN_THREADS
2134 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2135#endif
2136
2137#if USE_RUBY_DEBUG_LOG
2138 static rb_atomic_t nt_serial = 2;
2139 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2140#endif
2141 return nt;
2142}
2143
2144static int
2145native_thread_create_dedicated(rb_thread_t *th)
2146{
2147 th->nt = native_thread_alloc();
2148 th->nt->vm = th->vm;
2149 th->nt->running_thread = th;
2150 th->nt->dedicated = 1;
2151
2152 // vm stack
2153 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2154 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2155 th->sched.malloc_stack = true;
2156 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2157 th->sched.context_stack = vm_stack;
2158
2159
2160 int err = native_thread_create0(th->nt);
2161 if (!err) {
2162 // setup
2163 thread_sched_to_ready(TH_SCHED(th), th);
2164 }
2165 return err;
2166}
2167
2168static void
2169call_thread_start_func_2(rb_thread_t *th)
2170{
2171 /* Capture the address of a local in this stack frame to mark the beginning of the
2172 machine stack for this thread. This is required even if we can tell the real
2173 stack beginning from the pthread API in native_thread_init_stack, because
2174 glibc stores some of its own data on the stack before calling into user code
2175 on a new thread, and replacing that data on fiber-switch would break it (see
2176 bug #13887) */
2177 VALUE stack_start = 0;
2178 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2179
2180 native_thread_init_stack(th, stack_start_addr);
2181 thread_start_func_2(th, th->ec->machine.stack_start);
2182}
2183
2184static void *
2185nt_start(void *ptr)
2186{
2187 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2188 rb_vm_t *vm = nt->vm;
2189
2190 native_thread_setup_on_thread(nt);
2191
2192 // init tid
2193#ifdef RB_THREAD_T_HAS_NATIVE_ID
2194 nt->tid = get_native_thread_id();
2195#endif
2196
2197#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2198 ruby_nt_serial = nt->serial;
2199#endif
2200
2201 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2202
2203 if (!nt->dedicated) {
2204 coroutine_initialize_main(nt->nt_context);
2205 }
2206
2207 while (1) {
2208 if (nt->dedicated) {
2209 // wait running turn
2210 rb_thread_t *th = nt->running_thread;
2211 struct rb_thread_sched *sched = TH_SCHED(th);
2212
2213 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2214 ruby_thread_set_native(th);
2215
2216 thread_sched_lock(sched, th);
2217 {
2218 if (sched->running == th) {
2219 thread_sched_add_running_thread(sched, th);
2220 }
2221 thread_sched_wait_running_turn(sched, th, false);
2222 }
2223 thread_sched_unlock(sched, th);
2224
2225 // start threads
2226 call_thread_start_func_2(th);
2227 break; // TODO: allow to change to the SNT
2228 }
2229 else {
2230 RUBY_DEBUG_LOG("check next");
2231 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2232
2233 if (r) {
2234 struct rb_thread_sched *sched = &r->threads.sched;
2235
2236 thread_sched_lock(sched, NULL);
2237 {
2238 rb_thread_t *next_th = sched->running;
2239
2240 if (next_th && next_th->nt == NULL) {
2241 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2242 thread_sched_switch0(nt->nt_context, next_th, nt, false);
2243 }
2244 else {
2245 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2246 }
2247 }
2248 thread_sched_unlock(sched, NULL);
2249 }
2250 else {
2251 // timeout -> deleted.
2252 break;
2253 }
2254
2255 if (nt->dedicated) {
2256 // SNT becomes DNT while running
2257 break;
2258 }
2259 }
2260 }
2261
2262 return NULL;
2263}
2264
2265static int native_thread_create_shared(rb_thread_t *th);
2266
2267#if USE_MN_THREADS
2268static void nt_free_stack(void *mstack);
2269#endif
2270
2271void
2272rb_threadptr_remove(rb_thread_t *th)
2273{
2274#if USE_MN_THREADS
2275 if (th->sched.malloc_stack) {
2276 // dedicated
2277 return;
2278 }
2279 else {
2280 rb_vm_t *vm = th->vm;
2281 th->sched.finished = false;
2282
2283 RB_VM_LOCK_ENTER();
2284 {
2285 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2286 }
2287 RB_VM_LOCK_LEAVE();
2288 }
2289#endif
2290}
2291
2292void
2293rb_threadptr_sched_free(rb_thread_t *th)
2294{
2295#if USE_MN_THREADS
2296 if (th->sched.malloc_stack) {
2297 // has dedicated
2298 ruby_xfree(th->sched.context_stack);
2299 native_thread_destroy(th->nt);
2300 }
2301 else {
2302 nt_free_stack(th->sched.context_stack);
2303 // TODO: how to free nt and nt->altstack?
2304 }
2305
2306 ruby_xfree(th->sched.context);
2307 th->sched.context = NULL;
2308 // VM_ASSERT(th->sched.context == NULL);
2309#else
2310 ruby_xfree(th->sched.context_stack);
2311 native_thread_destroy(th->nt);
2312#endif
2313
2314 th->nt = NULL;
2315}
2316
2317void
2318rb_thread_sched_mark_zombies(rb_vm_t *vm)
2319{
2320 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2321 rb_thread_t *zombie_th, *next_zombie_th;
2322 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2323 if (zombie_th->sched.finished) {
2324 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2325 }
2326 else {
2327 rb_gc_mark(zombie_th->self);
2328 }
2329 }
2330 }
2331}
2332
2333static int
2334native_thread_create(rb_thread_t *th)
2335{
2336 VM_ASSERT(th->nt == 0);
2337 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2339
2340 if (!th->ractor->threads.sched.enable_mn_threads) {
2341 th->has_dedicated_nt = 1;
2342 }
2343
2344 if (th->has_dedicated_nt) {
2345 return native_thread_create_dedicated(th);
2346 }
2347 else {
2348 return native_thread_create_shared(th);
2349 }
2350}
2351
2352#if USE_NATIVE_THREAD_PRIORITY
2353
2354static void
2355native_thread_apply_priority(rb_thread_t *th)
2356{
2357#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2358 struct sched_param sp;
2359 int policy;
2360 int priority = 0 - th->priority;
2361 int max, min;
2362 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2363 max = sched_get_priority_max(policy);
2364 min = sched_get_priority_min(policy);
2365
2366 if (min > priority) {
2367 priority = min;
2368 }
2369 else if (max < priority) {
2370 priority = max;
2371 }
2372
2373 sp.sched_priority = priority;
2374 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2375#else
2376 /* not touched */
2377#endif
2378}
2379
2380#endif /* USE_NATIVE_THREAD_PRIORITY */
2381
2382static int
2383native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2384{
2385 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2386}
2387
2388static void
2389ubf_pthread_cond_signal(void *ptr)
2390{
2391 rb_thread_t *th = (rb_thread_t *)ptr;
2392 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2393 rb_native_cond_signal(&th->nt->cond.intr);
2394}
2395
2396static void
2397native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2398{
2399 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2400 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2401
2402 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2403 * current_time + 100,000,000. So cut up to 100,000,000. This is
2404 * considered as a kind of spurious wakeup. The caller to native_sleep
2405 * should care about spurious wakeup.
2406 *
2407 * See also [Bug #1341] [ruby-core:29702]
2408 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2409 */
2410 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2411
2412 THREAD_BLOCKING_BEGIN(th);
2413 {
2415 th->unblock.func = ubf_pthread_cond_signal;
2416 th->unblock.arg = th;
2417
2418 if (RUBY_VM_INTERRUPTED(th->ec)) {
2419 /* interrupted. return immediate */
2420 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2421 }
2422 else {
2423 if (!rel) {
2424 rb_native_cond_wait(cond, lock);
2425 }
2426 else {
2427 rb_hrtime_t end;
2428
2429 if (*rel > max) {
2430 *rel = max;
2431 }
2432
2433 end = native_cond_timeout(cond, *rel);
2434 native_cond_timedwait(cond, lock, &end);
2435 }
2436 }
2437 th->unblock.func = 0;
2438
2440 }
2441 THREAD_BLOCKING_END(th);
2442
2443 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2444}
2445
2446#ifdef USE_UBF_LIST
2447static CCAN_LIST_HEAD(ubf_list_head);
2448static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2449
2450static void
2451ubf_list_atfork(void)
2452{
2453 ccan_list_head_init(&ubf_list_head);
2454 rb_native_mutex_initialize(&ubf_list_lock);
2455}
2456
2458static bool
2459ubf_list_contain_p(rb_thread_t *th)
2460{
2461 rb_thread_t *list_th;
2462 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2463 if (list_th == th) return true;
2464 }
2465 return false;
2466}
2467
2468/* The thread 'th' is registered to be trying unblock. */
2469static void
2470register_ubf_list(rb_thread_t *th)
2471{
2472 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2473 struct ccan_list_node *node = &th->sched.node.ubf;
2474
2475 VM_ASSERT(th->unblock.func != NULL);
2476
2477 rb_native_mutex_lock(&ubf_list_lock);
2478 {
2479 // check not connected yet
2480 if (ccan_list_empty((struct ccan_list_head*)node)) {
2481 VM_ASSERT(!ubf_list_contain_p(th));
2482 ccan_list_add(&ubf_list_head, node);
2483 }
2484 }
2485 rb_native_mutex_unlock(&ubf_list_lock);
2486
2487 timer_thread_wakeup();
2488}
2489
2490/* The thread 'th' is unblocked. It no longer need to be registered. */
2491static void
2492unregister_ubf_list(rb_thread_t *th)
2493{
2494 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2495 struct ccan_list_node *node = &th->sched.node.ubf;
2496
2497 /* we can't allow re-entry into ubf_list_head */
2498 VM_ASSERT(th->unblock.func == NULL);
2499
2500 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2501 rb_native_mutex_lock(&ubf_list_lock);
2502 {
2503 VM_ASSERT(ubf_list_contain_p(th));
2504 ccan_list_del_init(node);
2505 }
2506 rb_native_mutex_unlock(&ubf_list_lock);
2507 }
2508}
2509
2510/*
2511 * send a signal to intent that a target thread return from blocking syscall.
2512 * Maybe any signal is ok, but we chose SIGVTALRM.
2513 */
2514static void
2515ubf_wakeup_thread(rb_thread_t *th)
2516{
2517 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2518
2519 pthread_kill(th->nt->thread_id, SIGVTALRM);
2520}
2521
2522static void
2523ubf_select(void *ptr)
2524{
2525 rb_thread_t *th = (rb_thread_t *)ptr;
2526 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2527 ubf_wakeup_thread(th);
2528 register_ubf_list(th);
2529}
2530
2531static bool
2532ubf_threads_empty(void)
2533{
2534 return ccan_list_empty(&ubf_list_head) != 0;
2535}
2536
2537static void
2538ubf_wakeup_all_threads(void)
2539{
2540 if (!ubf_threads_empty()) {
2541 rb_thread_t *th;
2542 rb_native_mutex_lock(&ubf_list_lock);
2543 {
2544 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2545 ubf_wakeup_thread(th);
2546 }
2547 }
2548 rb_native_mutex_unlock(&ubf_list_lock);
2549 }
2550}
2551
2552#else /* USE_UBF_LIST */
2553#define register_ubf_list(th) (void)(th)
2554#define unregister_ubf_list(th) (void)(th)
2555#define ubf_select 0
2556static void ubf_wakeup_all_threads(void) { return; }
2557static bool ubf_threads_empty(void) { return true; }
2558#define ubf_list_atfork() do {} while (0)
2559#endif /* USE_UBF_LIST */
2560
2561#define TT_DEBUG 0
2562#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2563
2564void
2565rb_thread_wakeup_timer_thread(int sig)
2566{
2567 // This function can be called from signal handlers so that
2568 // pthread_mutex_lock() should not be used.
2569
2570 // wakeup timer thread
2571 timer_thread_wakeup_force();
2572
2573 // interrupt main thread if main thread is available
2574 if (system_working) {
2575 rb_vm_t *vm = GET_VM();
2576 rb_thread_t *main_th = vm->ractor.main_thread;
2577
2578 if (main_th) {
2579 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2580
2581 if (main_th_ec) {
2582 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2583
2584 if (vm->ubf_async_safe && main_th->unblock.func) {
2585 (main_th->unblock.func)(main_th->unblock.arg);
2586 }
2587 }
2588 }
2589 }
2590}
2591
2592#define CLOSE_INVALIDATE_PAIR(expr) \
2593 close_invalidate_pair(expr,"close_invalidate: "#expr)
2594static void
2595close_invalidate(int *fdp, const char *msg)
2596{
2597 int fd = *fdp;
2598
2599 *fdp = -1;
2600 if (close(fd) < 0) {
2601 async_bug_fd(msg, errno, fd);
2602 }
2603}
2604
2605static void
2606close_invalidate_pair(int fds[2], const char *msg)
2607{
2608 if (USE_EVENTFD && fds[0] == fds[1]) {
2609 fds[1] = -1; // disable write port first
2610 close_invalidate(&fds[0], msg);
2611 }
2612 else {
2613 close_invalidate(&fds[1], msg);
2614 close_invalidate(&fds[0], msg);
2615 }
2616}
2617
2618static void
2619set_nonblock(int fd)
2620{
2621 int oflags;
2622 int err;
2623
2624 oflags = fcntl(fd, F_GETFL);
2625 if (oflags == -1)
2626 rb_sys_fail(0);
2627 oflags |= O_NONBLOCK;
2628 err = fcntl(fd, F_SETFL, oflags);
2629 if (err == -1)
2630 rb_sys_fail(0);
2631}
2632
2633/* communication pipe with timer thread and signal handler */
2634static void
2635setup_communication_pipe_internal(int pipes[2])
2636{
2637 int err;
2638
2639 if (pipes[0] > 0 || pipes[1] > 0) {
2640 VM_ASSERT(pipes[0] > 0);
2641 VM_ASSERT(pipes[1] > 0);
2642 return;
2643 }
2644
2645 /*
2646 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2647 * missing EFD_* flags, they can fall back to pipe
2648 */
2649#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2650 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2651
2652 if (pipes[0] >= 0) {
2653 rb_update_max_fd(pipes[0]);
2654 return;
2655 }
2656#endif
2657
2658 err = rb_cloexec_pipe(pipes);
2659 if (err != 0) {
2660 rb_bug("can not create communication pipe");
2661 }
2662 rb_update_max_fd(pipes[0]);
2663 rb_update_max_fd(pipes[1]);
2664 set_nonblock(pipes[0]);
2665 set_nonblock(pipes[1]);
2666}
2667
2668#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2669# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2670#endif
2671
2672enum {
2673 THREAD_NAME_MAX =
2674#if defined(__linux__)
2675 16
2676#elif defined(__APPLE__)
2677/* Undocumented, and main thread seems unlimited */
2678 64
2679#else
2680 16
2681#endif
2682};
2683
2684static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2685
2686static void
2687native_set_thread_name(rb_thread_t *th)
2688{
2689#ifdef SET_CURRENT_THREAD_NAME
2690 VALUE loc;
2691 if (!NIL_P(loc = th->name)) {
2692 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2693 }
2694 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2695 char *name, *p;
2696 char buf[THREAD_NAME_MAX];
2697 size_t len;
2698 int n;
2699
2700 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2701 p = strrchr(name, '/'); /* show only the basename of the path. */
2702 if (p && p[1])
2703 name = p + 1;
2704
2705 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2706 RB_GC_GUARD(loc);
2707
2708 len = (size_t)n;
2709 if (len >= sizeof(buf)) {
2710 buf[sizeof(buf)-2] = '*';
2711 buf[sizeof(buf)-1] = '\0';
2712 }
2713 SET_CURRENT_THREAD_NAME(buf);
2714 }
2715#endif
2716}
2717
2718static void
2719native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2720{
2721#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2722 char buf[THREAD_NAME_MAX];
2723 const char *s = "";
2724# if !defined SET_ANOTHER_THREAD_NAME
2725 if (!pthread_equal(pthread_self(), thread_id)) return;
2726# endif
2727 if (!NIL_P(name)) {
2728 long n;
2729 RSTRING_GETMEM(name, s, n);
2730 if (n >= (int)sizeof(buf)) {
2731 memcpy(buf, s, sizeof(buf)-1);
2732 buf[sizeof(buf)-1] = '\0';
2733 s = buf;
2734 }
2735 }
2736# if defined SET_ANOTHER_THREAD_NAME
2737 SET_ANOTHER_THREAD_NAME(thread_id, s);
2738# elif defined SET_CURRENT_THREAD_NAME
2739 SET_CURRENT_THREAD_NAME(s);
2740# endif
2741#endif
2742}
2743
2744#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2745static VALUE
2746native_thread_native_thread_id(rb_thread_t *target_th)
2747{
2748 if (!target_th->nt) return Qnil;
2749
2750#ifdef RB_THREAD_T_HAS_NATIVE_ID
2751 int tid = target_th->nt->tid;
2752 if (tid == 0) return Qnil;
2753 return INT2FIX(tid);
2754#elif defined(__APPLE__)
2755 uint64_t tid;
2756/* The first condition is needed because MAC_OS_X_VERSION_10_6
2757 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2758 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2759 so it has C++17 and everything needed to build modern Ruby. */
2760# if (!defined(MAC_OS_X_VERSION_10_6) || \
2761 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2762 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2763 const bool no_pthread_threadid_np = true;
2764# define NO_PTHREAD_MACH_THREAD_NP 1
2765# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2766 const bool no_pthread_threadid_np = false;
2767# else
2768# if !(defined(__has_attribute) && __has_attribute(availability))
2769 /* __API_AVAILABLE macro does nothing on gcc */
2770 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2771# endif
2772 /* Check weakly linked symbol */
2773 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2774# endif
2775 if (no_pthread_threadid_np) {
2776 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2777 }
2778# ifndef NO_PTHREAD_MACH_THREAD_NP
2779 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2780 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2781 return ULL2NUM((unsigned long long)tid);
2782# endif
2783#endif
2784}
2785# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2786#else
2787# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2788#endif
2789
2790static struct {
2791 rb_serial_t created_fork_gen;
2792 pthread_t pthread_id;
2793
2794 int comm_fds[2]; // r, w
2795
2796#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2797 int event_fd; // kernel event queue fd (epoll/kqueue)
2798#endif
2799#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2800#define EPOLL_EVENTS_MAX 0x10
2801 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2802#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2803#define KQUEUE_EVENTS_MAX 0x10
2804 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2805#endif
2806
2807 // waiting threads list
2808 struct ccan_list_head waiting; // waiting threads in ractors
2809 pthread_mutex_t waiting_lock;
2810} timer_th = {
2811 .created_fork_gen = 0,
2812};
2813
2814#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2815
2816static void timer_thread_check_timeslice(rb_vm_t *vm);
2817static int timer_thread_set_timeout(rb_vm_t *vm);
2818static void timer_thread_wakeup_thread(rb_thread_t *th);
2819
2820#include "thread_pthread_mn.c"
2821
2822static rb_thread_t *
2823thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
2824{
2825 if (w) {
2826 return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
2827 }
2828 else {
2829 return NULL;
2830 }
2831}
2832
2833static int
2834timer_thread_set_timeout(rb_vm_t *vm)
2835{
2836#if 0
2837 return 10; // ms
2838#else
2839 int timeout = -1;
2840
2841 ractor_sched_lock(vm, NULL);
2842 {
2843 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2844 || !ubf_threads_empty() // (1-3) Periodic UBF
2845 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2846 ) {
2847
2848 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2849 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2850 !ubf_threads_empty(),
2851 (vm->ractor.sched.grq_cnt > 0));
2852
2853 timeout = 10; // ms
2854 vm->ractor.sched.timeslice_wait_inf = false;
2855 }
2856 else {
2857 vm->ractor.sched.timeslice_wait_inf = true;
2858 }
2859 }
2860 ractor_sched_unlock(vm, NULL);
2861
2862 // Always check waiting threads to find minimum timeout
2863 // even when scheduler has work (grq_cnt > 0)
2864 rb_native_mutex_lock(&timer_th.waiting_lock);
2865 {
2866 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2867 rb_thread_t *th = thread_sched_waiting_thread(w);
2868
2869 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2870 rb_hrtime_t now = rb_hrtime_now();
2871 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2872
2873 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2874
2875 // TODO: overflow?
2876 int thread_timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2877
2878 // Use minimum of scheduler timeout and thread sleep timeout
2879 if (timeout < 0 || thread_timeout < timeout) {
2880 timeout = thread_timeout;
2881 }
2882 }
2883 }
2884 rb_native_mutex_unlock(&timer_th.waiting_lock);
2885
2886 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2887
2888 // fprintf(stderr, "timeout:%d\n", timeout);
2889 return timeout;
2890#endif
2891}
2892
2893static void
2894timer_thread_check_signal(rb_vm_t *vm)
2895{
2896 // ruby_sigchld_handler(vm); TODO
2897
2898 int signum = rb_signal_buff_size();
2899 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2900 RUBY_DEBUG_LOG("signum:%d", signum);
2901 threadptr_trap_interrupt(vm->ractor.main_thread);
2902 }
2903}
2904
2905static bool
2906timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2907{
2908 if (abs < now) {
2909 return true;
2910 }
2911 else if (abs - now < RB_HRTIME_PER_MSEC) {
2912 return true; // too short time
2913 }
2914 else {
2915 return false;
2916 }
2917}
2918
2919static rb_thread_t *
2920timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2921{
2922 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2923
2924 if (w != NULL &&
2925 (w->flags & thread_sched_waiting_timeout) &&
2926 timer_thread_check_exceed(w->data.timeout, now)) {
2927
2928 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2929
2930 // delete from waiting list
2931 ccan_list_del_init(&w->node);
2932
2933 // setup result
2934 w->flags = thread_sched_waiting_none;
2935 w->data.result = 0;
2936
2937 return thread_sched_waiting_thread(w);
2938 }
2939
2940 return NULL;
2941}
2942
2943static void
2944timer_thread_wakeup_thread(rb_thread_t *th)
2945{
2946 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2947 struct rb_thread_sched *sched = TH_SCHED(th);
2948
2949 thread_sched_lock(sched, th);
2950 {
2951 if (sched->running != th) {
2952 thread_sched_to_ready_common(sched, th, true, false);
2953 }
2954 else {
2955 // will be release the execution right
2956 }
2957 }
2958 thread_sched_unlock(sched, th);
2959}
2960
2961static void
2962timer_thread_check_timeout(rb_vm_t *vm)
2963{
2964 rb_hrtime_t now = rb_hrtime_now();
2965 rb_thread_t *th;
2966
2967 rb_native_mutex_lock(&timer_th.waiting_lock);
2968 {
2969 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2970 timer_thread_wakeup_thread(th);
2971 }
2972 }
2973 rb_native_mutex_unlock(&timer_th.waiting_lock);
2974}
2975
2976static void
2977timer_thread_check_timeslice(rb_vm_t *vm)
2978{
2979 // TODO: check time
2980 rb_thread_t *th;
2981 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2982 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2983 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2984 }
2985}
2986
2987void
2988rb_assert_sig(void)
2989{
2990 sigset_t oldmask;
2991 pthread_sigmask(0, NULL, &oldmask);
2992 if (sigismember(&oldmask, SIGVTALRM)) {
2993 rb_bug("!!!");
2994 }
2995 else {
2996 RUBY_DEBUG_LOG("ok");
2997 }
2998}
2999
3000static void *
3001timer_thread_func(void *ptr)
3002{
3003 rb_vm_t *vm = (rb_vm_t *)ptr;
3004#if defined(RUBY_NT_SERIAL)
3005 ruby_nt_serial = (rb_atomic_t)-1;
3006#endif
3007
3008 RUBY_DEBUG_LOG("started%s", "");
3009
3010 while (system_working) {
3011 timer_thread_check_signal(vm);
3012 timer_thread_check_timeout(vm);
3013 ubf_wakeup_all_threads();
3014
3015 RUBY_DEBUG_LOG("system_working:%d", system_working);
3016 timer_thread_polling(vm);
3017 }
3018
3019 RUBY_DEBUG_LOG("terminated");
3020 return NULL;
3021}
3022
3023/* only use signal-safe system calls here */
3024static void
3025signal_communication_pipe(int fd)
3026{
3027#if USE_EVENTFD
3028 const uint64_t buff = 1;
3029#else
3030 const char buff = '!';
3031#endif
3032 ssize_t result;
3033
3034 /* already opened */
3035 if (fd >= 0) {
3036 retry:
3037 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3038 int e = errno;
3039 switch (e) {
3040 case EINTR: goto retry;
3041 case EAGAIN:
3042#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3043 case EWOULDBLOCK:
3044#endif
3045 break;
3046 default:
3047 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3048 }
3049 }
3050 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3051 }
3052 else {
3053 // ignore wakeup
3054 }
3055}
3056
3057static void
3058timer_thread_wakeup_force(void)
3059{
3060 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3061 signal_communication_pipe(timer_th.comm_fds[1]);
3062}
3063
3064static void
3065timer_thread_wakeup_locked(rb_vm_t *vm)
3066{
3067 // should be locked before.
3068 ASSERT_ractor_sched_locked(vm, NULL);
3069
3070 if (timer_th.created_fork_gen == current_fork_gen) {
3071 if (vm->ractor.sched.timeslice_wait_inf) {
3072 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3073 timer_thread_wakeup_force();
3074 }
3075 else {
3076 RUBY_DEBUG_LOG("will be wakeup...");
3077 }
3078 }
3079}
3080
3081static void
3082timer_thread_wakeup(void)
3083{
3084 rb_vm_t *vm = GET_VM();
3085
3086 ractor_sched_lock(vm, NULL);
3087 {
3088 timer_thread_wakeup_locked(vm);
3089 }
3090 ractor_sched_unlock(vm, NULL);
3091}
3092
3093static void
3094rb_thread_create_timer_thread(void)
3095{
3096 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3097
3098 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3099
3100 timer_th.created_fork_gen = current_fork_gen;
3101
3102 if (created_fork_gen != current_fork_gen) {
3103 if (created_fork_gen != 0) {
3104 RUBY_DEBUG_LOG("forked child process");
3105
3106 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3107#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3108 close_invalidate(&timer_th.event_fd, "close event_fd");
3109#endif
3110 rb_native_mutex_destroy(&timer_th.waiting_lock);
3111 }
3112
3113 ccan_list_head_init(&timer_th.waiting);
3114 rb_native_mutex_initialize(&timer_th.waiting_lock);
3115
3116 // open communication channel
3117 setup_communication_pipe_internal(timer_th.comm_fds);
3118
3119 // open event fd
3120 timer_thread_setup_mn();
3121 }
3122
3123 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3124}
3125
3126static int
3127native_stop_timer_thread(void)
3128{
3129 int stopped;
3130 stopped = --system_working <= 0;
3131
3132 if (stopped) {
3133 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3134 timer_thread_wakeup_force();
3135 RUBY_DEBUG_LOG("wakeup sent");
3136 pthread_join(timer_th.pthread_id, NULL);
3137 }
3138
3139 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3140 return stopped;
3141}
3142
3143static void
3144native_reset_timer_thread(void)
3145{
3146 //
3147}
3148
3149#ifdef HAVE_SIGALTSTACK
3150int
3151ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3152{
3153 void *base;
3154 size_t size;
3155 const size_t water_mark = 1024 * 1024;
3156 STACK_GROW_DIR_DETECTION;
3157
3158#ifdef STACKADDR_AVAILABLE
3159 if (get_stack(&base, &size) == 0) {
3160# ifdef __APPLE__
3161 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3162 struct rlimit rlim;
3163 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3164 size = (size_t)rlim.rlim_cur;
3165 }
3166 }
3167# endif
3168 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3169 }
3170 else
3171#endif
3172 if (th) {
3173 size = th->ec->machine.stack_maxsize;
3174 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3175 }
3176 else {
3177 return 0;
3178 }
3179 size /= RUBY_STACK_SPACE_RATIO;
3180 if (size > water_mark) size = water_mark;
3181 if (IS_STACK_DIR_UPPER()) {
3182 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3183 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3184 }
3185 else {
3186 if (size > (size_t)base) size = (size_t)base;
3187 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3188 }
3189 return 0;
3190}
3191#endif
3192
3193int
3194rb_reserved_fd_p(int fd)
3195{
3196 /* no false-positive if out-of-FD at startup */
3197 if (fd < 0) return 0;
3198
3199 if (fd == timer_th.comm_fds[0] ||
3200 fd == timer_th.comm_fds[1]
3201#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3202 || fd == timer_th.event_fd
3203#endif
3204 ) {
3205 goto check_fork_gen;
3206 }
3207 return 0;
3208
3209 check_fork_gen:
3210 if (timer_th.created_fork_gen == current_fork_gen) {
3211 /* async-signal-safe */
3212 return 1;
3213 }
3214 else {
3215 return 0;
3216 }
3217}
3218
3219rb_nativethread_id_t
3221{
3222 return pthread_self();
3223}
3224
3225#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3226/* TODO: don't ignore sigmask */
3227static int
3228ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3229 const struct timespec *ts, const sigset_t *sigmask)
3230{
3231 int timeout_ms;
3232
3233 if (ts) {
3234 int tmp, tmp2;
3235
3236 if (ts->tv_sec > INT_MAX/1000)
3237 timeout_ms = INT_MAX;
3238 else {
3239 tmp = (int)(ts->tv_sec * 1000);
3240 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3241 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3242 if (INT_MAX - tmp < tmp2)
3243 timeout_ms = INT_MAX;
3244 else
3245 timeout_ms = (int)(tmp + tmp2);
3246 }
3247 }
3248 else
3249 timeout_ms = -1;
3250
3251 return poll(fds, nfds, timeout_ms);
3252}
3253# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3254#endif
3255
3256/*
3257 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3258 * since threads may be too starved to enter the GVL waitqueue for
3259 * us to detect contention. Instead, we want to kick other threads
3260 * so they can run and possibly prevent us from entering slow paths
3261 * in ppoll() or similar syscalls.
3262 *
3263 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3264 * [ruby-core:90417] [Bug #15398]
3265 */
3266#define THREAD_BLOCKING_YIELD(th) do { \
3267 const rb_thread_t *next_th; \
3268 struct rb_thread_sched *sched = TH_SCHED(th); \
3269 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3270 thread_sched_to_waiting(sched, (th)); \
3271 next_th = sched->running; \
3272 rb_native_mutex_unlock(&sched->lock_); \
3273 native_thread_yield(); /* TODO: needed? */ \
3274 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3275 native_thread_yield(); \
3276 }
3277
3278static void
3279native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3280{
3281 struct rb_thread_sched *sched = TH_SCHED(th);
3282
3283 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3284 if (rel) {
3285 if (th_has_dedicated_nt(th)) {
3286 native_cond_sleep(th, rel);
3287 }
3288 else {
3289 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3290 }
3291 }
3292 else {
3293 thread_sched_to_waiting_until_wakeup(sched, th);
3294 }
3295
3296 RUBY_DEBUG_LOG("wakeup");
3297}
3298
3299// fork read-write lock (only for pthread)
3300static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3301
3302void
3303rb_thread_release_fork_lock(void)
3304{
3305 int r;
3306 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3307 rb_bug_errno("pthread_rwlock_unlock", r);
3308 }
3309}
3310
3311void
3312rb_thread_reset_fork_lock(void)
3313{
3314 int r;
3315 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3316 rb_bug_errno("pthread_rwlock_destroy", r);
3317 }
3318
3319 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3320 rb_bug_errno("pthread_rwlock_init", r);
3321 }
3322}
3323
3324void *
3325rb_thread_prevent_fork(void *(*func)(void *), void *data)
3326{
3327 int r;
3328 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3329 rb_bug_errno("pthread_rwlock_rdlock", r);
3330 }
3331 void *result = func(data);
3332 rb_thread_release_fork_lock();
3333 return result;
3334}
3335
3336void
3337rb_thread_acquire_fork_lock(void)
3338{
3339 int r;
3340 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3341 rb_bug_errno("pthread_rwlock_wrlock", r);
3342 }
3343}
3344
3345// thread internal event hooks (only for pthread)
3346
3347struct rb_internal_thread_event_hook {
3348 rb_internal_thread_event_callback callback;
3349 rb_event_flag_t event;
3350 void *user_data;
3351
3352 struct rb_internal_thread_event_hook *next;
3353};
3354
3355static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3356
3357rb_internal_thread_event_hook_t *
3358rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3359{
3360 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3361 hook->callback = callback;
3362 hook->user_data = user_data;
3363 hook->event = internal_event;
3364
3365 int r;
3366 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3367 rb_bug_errno("pthread_rwlock_wrlock", r);
3368 }
3369
3370 hook->next = rb_internal_thread_event_hooks;
3371 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3372
3373 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3374 rb_bug_errno("pthread_rwlock_unlock", r);
3375 }
3376 return hook;
3377}
3378
3379bool
3380rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3381{
3382 int r;
3383 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3384 rb_bug_errno("pthread_rwlock_wrlock", r);
3385 }
3386
3387 bool success = FALSE;
3388
3389 if (rb_internal_thread_event_hooks == hook) {
3390 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3391 success = TRUE;
3392 }
3393 else {
3394 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3395
3396 do {
3397 if (h->next == hook) {
3398 h->next = hook->next;
3399 success = TRUE;
3400 break;
3401 }
3402 } while ((h = h->next));
3403 }
3404
3405 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3406 rb_bug_errno("pthread_rwlock_unlock", r);
3407 }
3408
3409 if (success) {
3410 ruby_xfree(hook);
3411 }
3412 return success;
3413}
3414
3415static void
3416rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3417{
3418 int r;
3419 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3420 rb_bug_errno("pthread_rwlock_rdlock", r);
3421 }
3422
3423 if (rb_internal_thread_event_hooks) {
3424 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3425 do {
3426 if (h->event & event) {
3427 rb_internal_thread_event_data_t event_data = {
3428 .thread = th->self,
3429 };
3430 (*h->callback)(event, &event_data, h->user_data);
3431 }
3432 } while((h = h->next));
3433 }
3434 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3435 rb_bug_errno("pthread_rwlock_unlock", r);
3436 }
3437}
3438
3439// return true if the current thread acquires DNT.
3440// return false if the current thread already acquires DNT.
3441bool
3443{
3444 rb_thread_t *th = GET_THREAD();
3445 bool is_snt = th->nt->dedicated == 0;
3446 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
3447
3448 return is_snt;
3449}
3450
3451#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
Definition error.c:1440
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3877
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
Definition error.c:1140
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
Definition io.c:427
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
Definition io.c:248
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1992
int len
Length of the buffer.
Definition io.h:8
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
Definition thread.h:238
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
Definition thread.h:252
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
Definition thread.h:245
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
Definition thread.h:224
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
Definition thread.h:231
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]].
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
Definition rstring.h:450
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
Definition string.c:8278
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition gc.c:4685
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition gc.c:4555