12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
18#ifdef HAVE_SYS_RESOURCE_H
19#include <sys/resource.h>
21#ifdef HAVE_THR_STKSEGMENT
24#if defined(HAVE_FCNTL_H)
26#elif defined(HAVE_SYS_FCNTL_H)
29#ifdef HAVE_SYS_PRCTL_H
32#if defined(HAVE_SYS_TIME_H)
39#include <sys/syscall.h>
45# include <AvailabilityMacros.h>
48#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49# define USE_EVENTFD (1)
50# include <sys/eventfd.h>
52# define USE_EVENTFD (0)
55#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58static pthread_condattr_t condattr_mono;
59static pthread_condattr_t *condattr_monotonic = &condattr_mono;
61static const void *
const condattr_monotonic = NULL;
66#ifndef HAVE_SYS_EVENT_H
67#define HAVE_SYS_EVENT_H 0
70#ifndef HAVE_SYS_EPOLL_H
71#define HAVE_SYS_EPOLL_H 0
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
90 #define USE_MN_THREADS 0
96#define NATIVE_MUTEX_LOCK_DEBUG 0
99mutex_debug(
const char *msg,
void *lock)
101 if (NATIVE_MUTEX_LOCK_DEBUG) {
103 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
105 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
106 fprintf(stdout,
"%s: %p\n", msg, lock);
107 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
115 mutex_debug(
"lock", lock);
116 if ((r = pthread_mutex_lock(lock)) != 0) {
125 mutex_debug(
"unlock", lock);
126 if ((r = pthread_mutex_unlock(lock)) != 0) {
135 mutex_debug(
"trylock", lock);
136 if ((r = pthread_mutex_trylock(lock)) != 0) {
150 int r = pthread_mutex_init(lock, 0);
151 mutex_debug(
"init", lock);
160 int r = pthread_mutex_destroy(lock);
161 mutex_debug(
"destroy", lock);
170 int r = pthread_cond_init(cond, condattr_monotonic);
179 int r = pthread_cond_destroy(cond);
200 r = pthread_cond_signal(cond);
201 }
while (r == EAGAIN);
212 r = pthread_cond_broadcast(cond);
213 }
while (r == EAGAIN);
222 int r = pthread_cond_wait(cond, mutex);
229native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
241 rb_hrtime2timespec(&ts, abs);
242 r = pthread_cond_timedwait(cond, mutex, &ts);
243 }
while (r == EINTR);
245 if (r != 0 && r != ETIMEDOUT) {
253native_cond_timeout(rb_nativethread_cond_t *cond,
const rb_hrtime_t rel)
255 if (condattr_monotonic) {
256 return rb_hrtime_add(rb_hrtime_now(), rel);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
269 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
270 native_cond_timedwait(cond, mutex, &hrmsec);
275static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
276static void rb_thread_execute_hooks(
rb_event_flag_t event, rb_thread_t *th);
297#define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
303#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
306static rb_serial_t current_fork_gen = 1;
308#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309# define USE_UBF_LIST 1
312static void threadptr_trap_interrupt(rb_thread_t *);
314#ifdef HAVE_SCHED_YIELD
315#define native_thread_yield() (void)sched_yield()
317#define native_thread_yield() ((void)0)
320static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr,
struct rb_native_thread *nt);
321static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr,
struct rb_native_thread *nt);
322static void native_thread_assign(
struct rb_native_thread *nt, rb_thread_t *th);
324static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
325static void timer_thread_wakeup(
void);
326static void timer_thread_wakeup_locked(rb_vm_t *vm);
327static void timer_thread_wakeup_force(
void);
328static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
332#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
335th_has_dedicated_nt(
const rb_thread_t *th)
338 return th->nt->dedicated > 0;
343thread_sched_dump_(const
char *file,
int line, struct
rb_thread_sched *sched)
345 fprintf(stderr,
"@%s:%d running:%d\n", file, line, sched->running ? (
int)sched->running->serial : -1);
348 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
349 i++;
if (i>10) rb_bug(
"too many");
350 fprintf(stderr,
" ready:%d (%sNT:%d)\n", th->serial,
351 th->nt ? (th->nt->dedicated ?
"D" :
"S") :
"x",
352 th->nt ? (
int)th->nt->serial : -1);
356#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
360ractor_sched_dump_(const
char *file,
int line, rb_vm_t *vm)
364 fprintf(stderr,
"ractor_sched_dump %s:%d\n", file, line);
367 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
369 if (i>10) rb_bug(
"!!");
370 fprintf(stderr,
" %d ready:%d\n", i, rb_ractor_id(r));
374#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
375#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
378thread_sched_lock_(
struct rb_thread_sched *sched, rb_thread_t *th,
const char *file,
int line)
383 RUBY_DEBUG_LOG2(file, line,
"th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
384 VM_ASSERT(sched->lock_owner == NULL);
385 sched->lock_owner = th;
387 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
392thread_sched_unlock_(
struct rb_thread_sched *sched, rb_thread_t *th,
const char *file,
int line)
394 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
397 VM_ASSERT(sched->lock_owner == th);
398 sched->lock_owner = NULL;
405thread_sched_set_lock_owner(
struct rb_thread_sched *sched, rb_thread_t *th)
407 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
410 sched->lock_owner = th;
415ASSERT_thread_sched_locked(
struct rb_thread_sched *sched, rb_thread_t *th)
421 VM_ASSERT(sched->lock_owner == th);
424 VM_ASSERT(sched->lock_owner != NULL);
429#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
430#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
434rb_ractor_serial(const rb_ractor_t *r) {
436 return rb_ractor_id(r);
444ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
447 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
448 VM_ASSERT(vm->ractor.sched.locked ==
false);
450 vm->ractor.sched.lock_owner = cr;
451 vm->ractor.sched.locked =
true;
456ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
459 VM_ASSERT(vm->ractor.sched.locked);
460 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
462 vm->ractor.sched.locked =
false;
463 vm->ractor.sched.lock_owner = NULL;
468ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr,
const char *file,
int line)
473 RUBY_DEBUG_LOG2(file, line,
"cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
475 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
478 ractor_sched_set_locked(vm, cr);
482ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr,
const char *file,
int line)
484 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
486 ractor_sched_set_unlocked(vm, cr);
491ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
494 VM_ASSERT(vm->ractor.sched.locked);
495 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
500ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
503 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
504 if (rth == th)
return true;
511ractor_sched_running_threads_size(rb_vm_t *vm)
515 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
523ractor_sched_timeslice_threads_size(rb_vm_t *vm)
527 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
535ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
538 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
539 if (rth == th)
return true;
544static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
545static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
549thread_sched_setup_running_threads(
struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
550 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
552#if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
556 rb_thread_t *del_timeslice_th;
558 if (del_th && sched->is_running_timeslice) {
559 del_timeslice_th = del_th;
560 sched->is_running_timeslice =
false;
563 del_timeslice_th = NULL;
566 RUBY_DEBUG_LOG(
"+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th), rb_th_serial(del_th),
568 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
570 ractor_sched_lock(vm, cr);
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
575 VM_ASSERT(del_timeslice_th != NULL ||
576 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
578 ccan_list_del_init(&del_th->sched.node.running_threads);
579 vm->ractor.sched.running_cnt--;
581 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
582 ractor_sched_barrier_join_signal_locked(vm);
584 sched->is_running =
false;
588 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 RUBY_DEBUG_LOG(
"barrier-wait");
591 ractor_sched_barrier_join_signal_locked(vm);
592 ractor_sched_barrier_join_wait_locked(vm, add_th);
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
598 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
599 vm->ractor.sched.running_cnt++;
600 sched->is_running =
true;
601 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
604 if (add_timeslice_th) {
606 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
608 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
609 sched->is_running_timeslice =
true;
611 timer_thread_wakeup_locked(vm);
615 if (del_timeslice_th) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
617 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
620 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
623 ractor_sched_unlock(vm, cr);
625 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
627 rb_thread_t *lock_owner = NULL;
629 lock_owner = sched->lock_owner;
631 thread_sched_unlock(sched, lock_owner);
636 thread_sched_lock(sched, lock_owner);
642 RUBY_DEBUG_LOG(
"run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
646thread_sched_add_running_thread(
struct rb_thread_sched *sched, rb_thread_t *th)
648 ASSERT_thread_sched_locked(sched, th);
649 VM_ASSERT(sched->running == th);
651 rb_vm_t *vm = th->vm;
652 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
656thread_sched_del_running_thread(
struct rb_thread_sched *sched, rb_thread_t *th)
658 ASSERT_thread_sched_locked(sched, th);
660 rb_vm_t *vm = th->vm;
661 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
665rb_add_running_thread(rb_thread_t *th)
669 thread_sched_lock(sched, th);
671 thread_sched_add_running_thread(sched, th);
673 thread_sched_unlock(sched, th);
677rb_del_running_thread(rb_thread_t *th)
681 thread_sched_lock(sched, th);
683 thread_sched_del_running_thread(sched, th);
685 thread_sched_unlock(sched, th);
693thread_sched_set_running(
struct rb_thread_sched *sched, rb_thread_t *th)
695 RUBY_DEBUG_LOG(
"th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
696 VM_ASSERT(sched->running != th);
703thread_sched_readyq_contain_p(struct
rb_thread_sched *sched, rb_thread_t *th)
706 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
707 if (rth == th)
return true;
719 ASSERT_thread_sched_locked(sched, NULL);
720 rb_thread_t *next_th;
722 VM_ASSERT(sched->running != NULL);
724 if (ccan_list_empty(&sched->readyq)) {
728 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
730 VM_ASSERT(sched->readyq_cnt > 0);
732 ccan_list_node_init(&next_th->sched.node.readyq);
735 RUBY_DEBUG_LOG(
"next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
744 ASSERT_thread_sched_locked(sched, NULL);
745 RUBY_DEBUG_LOG(
"ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
747 VM_ASSERT(sched->running != NULL);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
750 if (sched->is_running) {
751 if (ccan_list_empty(&sched->readyq)) {
753 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
760 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
767thread_sched_wakeup_running_thread(
struct rb_thread_sched *sched, rb_thread_t *next_th,
bool will_switch)
769 ASSERT_thread_sched_locked(sched, NULL);
770 VM_ASSERT(sched->running == next_th);
774 if (th_has_dedicated_nt(next_th)) {
775 RUBY_DEBUG_LOG(
"pinning th:%u", next_th->serial);
780 RUBY_DEBUG_LOG(
"th:%u is already running.", next_th->serial);
785 RUBY_DEBUG_LOG(
"th:%u (do nothing)", rb_th_serial(next_th));
788 RUBY_DEBUG_LOG(
"th:%u (enq)", rb_th_serial(next_th));
789 ractor_sched_enq(next_th->vm, next_th->ractor);
794 RUBY_DEBUG_LOG(
"no waiting threads%s",
"");
800thread_sched_to_ready_common(
struct rb_thread_sched *sched, rb_thread_t *th,
bool wakeup,
bool will_switch)
802 RUBY_DEBUG_LOG(
"th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
804 VM_ASSERT(sched->running != th);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
808 if (sched->running == NULL) {
809 thread_sched_set_running(sched, th);
810 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
813 thread_sched_enq(sched, th);
825 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
827 thread_sched_lock(sched, th);
829 thread_sched_to_ready_common(sched, th,
true,
false);
831 thread_sched_unlock(sched, th);
836thread_sched_wait_running_turn(
struct rb_thread_sched *sched, rb_thread_t *th,
bool can_direct_transfer)
838 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
840 ASSERT_thread_sched_locked(sched, th);
841 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
843 if (th != sched->running) {
848 rb_thread_t *next_th;
849 while((next_th = sched->running) != th) {
850 if (th_has_dedicated_nt(th)) {
851 RUBY_DEBUG_LOG(
"(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
853 thread_sched_set_lock_owner(sched, NULL);
855 RUBY_DEBUG_LOG(
"nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
858 thread_sched_set_lock_owner(sched, th);
860 RUBY_DEBUG_LOG(
"(nt) wakeup %s", sched->running == th ?
"success" :
"failed");
861 if (th == sched->running) {
862 rb_ractor_thread_switch(th->ractor, th);
867 if (can_direct_transfer &&
868 (next_th = sched->running) != NULL &&
872 RUBY_DEBUG_LOG(
"th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
874 thread_sched_set_lock_owner(sched, NULL);
876 rb_ractor_set_current_ec(th->ractor, NULL);
877 thread_sched_switch(th, next_th);
879 thread_sched_set_lock_owner(sched, th);
884 native_thread_assign(NULL, th);
886 RUBY_DEBUG_LOG(
"th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
888 thread_sched_set_lock_owner(sched, NULL);
890 rb_ractor_set_current_ec(th->ractor, NULL);
891 coroutine_transfer0(th->sched.context, nt->nt_context,
false);
893 thread_sched_set_lock_owner(sched, th);
896 VM_ASSERT(rb_current_ec_noinline() == th->ec);
900 VM_ASSERT(th->nt != NULL);
901 VM_ASSERT(rb_current_ec_noinline() == th->ec);
902 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
905 thread_sched_add_running_thread(sched, th);
914thread_sched_to_running_common(
struct rb_thread_sched *sched, rb_thread_t *th)
916 RUBY_DEBUG_LOG(
"th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
918 VM_ASSERT(sched->running != th);
919 VM_ASSERT(th_has_dedicated_nt(th));
920 VM_ASSERT(GET_THREAD() == th);
922 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
925 thread_sched_to_ready_common(sched, th,
false,
false);
927 if (sched->running == th) {
928 thread_sched_add_running_thread(sched, th);
932 thread_sched_wait_running_turn(sched, th,
false);
944 thread_sched_lock(sched, th);
946 thread_sched_to_running_common(sched, th);
948 thread_sched_unlock(sched, th);
959thread_sched_wakeup_next_thread(
struct rb_thread_sched *sched, rb_thread_t *th,
bool will_switch)
961 ASSERT_thread_sched_locked(sched, th);
963 VM_ASSERT(sched->running == th);
964 VM_ASSERT(sched->running->nt != NULL);
966 rb_thread_t *next_th = thread_sched_deq(sched);
968 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
969 VM_ASSERT(th != next_th);
971 thread_sched_set_running(sched, next_th);
972 VM_ASSERT(next_th == sched->running);
973 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
976 thread_sched_del_running_thread(sched, th);
989thread_sched_to_waiting_common0(
struct rb_thread_sched *sched, rb_thread_t *th,
bool to_dead)
993 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
995 RUBY_DEBUG_LOG(
"%sth:%u", to_dead ?
"to_dead " :
"", rb_th_serial(th));
997 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
998 thread_sched_wakeup_next_thread(sched, th, can_switch);
1003thread_sched_to_dead_common(
struct rb_thread_sched *sched, rb_thread_t *th)
1005 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1006 thread_sched_to_waiting_common0(sched, th,
true);
1014 thread_sched_lock(sched, th);
1016 thread_sched_to_dead_common(sched, th);
1018 thread_sched_unlock(sched, th);
1025thread_sched_to_waiting_common(
struct rb_thread_sched *sched, rb_thread_t *th)
1027 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1028 thread_sched_to_waiting_common0(sched, th,
false);
1035thread_sched_to_waiting(
struct rb_thread_sched *sched, rb_thread_t *th)
1037 thread_sched_lock(sched, th);
1039 thread_sched_to_waiting_common(sched, th);
1041 thread_sched_unlock(sched, th);
1050 th->unblock.func = func;
1051 th->unblock.arg = arg;
1057ubf_waiting(
void *ptr)
1059 rb_thread_t *th = (rb_thread_t *)ptr;
1063 th->unblock.func = NULL;
1064 th->unblock.arg = NULL;
1066 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1068 thread_sched_lock(sched, th);
1070 if (sched->running == th) {
1074 thread_sched_to_ready_common(sched, th,
true,
false);
1077 thread_sched_unlock(sched, th);
1084thread_sched_to_waiting_until_wakeup(
struct rb_thread_sched *sched, rb_thread_t *th)
1086 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1088 RB_VM_SAVE_MACHINE_CONTEXT(th);
1089 setup_ubf(th, ubf_waiting, (
void *)th);
1093 thread_sched_lock(sched, th);
1095 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1096 bool can_direct_transfer = !th_has_dedicated_nt(th);
1097 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1098 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1101 RUBY_DEBUG_LOG(
"th:%u interrupted", rb_th_serial(th));
1104 thread_sched_unlock(sched, th);
1106 setup_ubf(th, NULL, NULL);
1114 RUBY_DEBUG_LOG(
"th:%d sched->readyq_cnt:%d", (
int)th->serial, sched->readyq_cnt);
1116 thread_sched_lock(sched, th);
1118 if (!ccan_list_empty(&sched->readyq)) {
1120 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1121 bool can_direct_transfer = !th_has_dedicated_nt(th);
1122 thread_sched_to_ready_common(sched, th,
false, can_direct_transfer);
1123 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1126 VM_ASSERT(sched->readyq_cnt == 0);
1129 thread_sched_unlock(sched, th);
1138 sched->lock_owner = NULL;
1141 ccan_list_head_init(&sched->readyq);
1142 sched->readyq_cnt = 0;
1145 if (!atfork) sched->enable_mn_threads =
true;
1152#ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1154 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1158 struct
coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1162 VM_ASSERT(!to_dead);
1163#ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1165 (
const void**)&returning_from->stack_base, &returning_from->stack_size);
1173 VM_ASSERT(!nt->dedicated);
1174 VM_ASSERT(next_th->nt == NULL);
1176 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
1178 ruby_thread_set_native(next_th);
1179 native_thread_assign(nt, next_th);
1181 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1185thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1188 native_thread_assign(NULL, cth);
1189 RUBY_DEBUG_LOG(
"th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1190 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1193#if VM_CHECK_MODE > 0
1196grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1198 ASSERT_ractor_sched_locked(vm, cr);
1200 rb_ractor_t *r, *prev_r = NULL;
1203 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1206 VM_ASSERT(r != prev_r);
1214ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1217 rb_ractor_t *cr = NULL;
1219 VM_ASSERT(sched->running != NULL);
1220 VM_ASSERT(sched->running->nt == NULL);
1222 ractor_sched_lock(vm, cr);
1224#if VM_CHECK_MODE > 0
1227 ccan_list_for_each(&vm->ractor.sched.grq,
tr, threads.sched.grq_node) {
1232 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1233 vm->ractor.sched.grq_cnt++;
1234 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1236 RUBY_DEBUG_LOG(
"r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1242 ractor_sched_unlock(vm, cr);
1246#ifndef SNT_KEEP_SECONDS
1247#define SNT_KEEP_SECONDS 0
1252#define MINIMUM_SNT 0
1256ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1260 ractor_sched_lock(vm, cr);
1262 RUBY_DEBUG_LOG(
"empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1265 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1266 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1268 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1269 RUBY_DEBUG_LOG(
"wait grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1271#if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1273 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1274 RUBY_DEBUG_LOG(
"timeout, grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1275 VM_ASSERT(r == NULL);
1276 vm->ractor.sched.snt_cnt--;
1277 vm->ractor.sched.running_cnt--;
1281 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1284 ractor_sched_set_unlocked(vm, cr);
1286 ractor_sched_set_locked(vm, cr);
1288 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1292 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1295 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1296 vm->ractor.sched.grq_cnt--;
1297 RUBY_DEBUG_LOG(
"r:%d grq_cnt:%u", (
int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1300 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1304 ractor_sched_unlock(vm, cr);
1309void rb_ractor_lock_self(rb_ractor_t *r);
1310void rb_ractor_unlock_self(rb_ractor_t *r);
1317 rb_thread_t *
volatile th = rb_ec_thread_ptr(ec);
1319 cr->sync.wait.waiting_thread = th;
1321 setup_ubf(th, ubf, (
void *)cr);
1323 thread_sched_lock(sched, th);
1325 rb_ractor_unlock_self(cr);
1327 if (RUBY_VM_INTERRUPTED(th->ec)) {
1328 RUBY_DEBUG_LOG(
"interrupted");
1330 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1331 RUBY_DEBUG_LOG(
"awaken:%d", (
int)cr->sync.wait.wakeup_status);
1335 RB_VM_SAVE_MACHINE_CONTEXT(th);
1336 th->status = THREAD_STOPPED_FOREVER;
1340 bool can_direct_transfer = !th_has_dedicated_nt(th);
1341 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1342 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1343 th->status = THREAD_RUNNABLE;
1348 thread_sched_unlock(sched, th);
1350 setup_ubf(th, NULL, NULL);
1352 rb_ractor_lock_self(cr);
1353 cr->sync.wait.waiting_thread = NULL;
1357rb_ractor_sched_wakeup(rb_ractor_t *r)
1359 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1363 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1365 thread_sched_lock(sched, r_th);
1367 if (r_th->status == THREAD_STOPPED_FOREVER) {
1368 thread_sched_to_ready_common(sched, r_th,
true,
false);
1371 thread_sched_unlock(sched, r_th);
1375ractor_sched_barrier_completed_p(rb_vm_t *vm)
1377 RUBY_DEBUG_LOG(
"run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1378 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1379 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1383rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1385 VM_ASSERT(cr == GET_RACTOR());
1386 VM_ASSERT(vm->ractor.sync.lock_owner == cr);
1387 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1388 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1390 RUBY_DEBUG_LOG(
"start serial:%u", vm->ractor.sched.barrier_serial);
1392 unsigned int lock_rec;
1394 ractor_sched_lock(vm, cr);
1396 vm->ractor.sched.barrier_waiting =
true;
1399 lock_rec = vm->ractor.sync.lock_rec;
1400 vm->ractor.sync.lock_rec = 0;
1401 vm->ractor.sync.lock_owner = NULL;
1406 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1407 if (ith->ractor != cr) {
1408 RUBY_DEBUG_LOG(
"barrier int:%u", rb_th_serial(ith));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1414 while (!ractor_sched_barrier_completed_p(vm)) {
1415 ractor_sched_set_unlocked(vm, cr);
1417 ractor_sched_set_locked(vm, cr);
1421 ractor_sched_unlock(vm, cr);
1425 vm->ractor.sync.lock_rec = lock_rec;
1426 vm->ractor.sync.lock_owner = cr;
1428 RUBY_DEBUG_LOG(
"completed seirial:%u", vm->ractor.sched.barrier_serial);
1430 ractor_sched_lock(vm, cr);
1432 vm->ractor.sched.barrier_waiting =
false;
1433 vm->ractor.sched.barrier_serial++;
1434 vm->ractor.sched.barrier_waiting_cnt = 0;
1437 ractor_sched_unlock(vm, cr);
1441ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1443 if (ractor_sched_barrier_completed_p(vm)) {
1449ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1451 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1453 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1455 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1456 RUBY_DEBUG_LOG(
"sleep serial:%u", barrier_serial);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1459 rb_ractor_t *cr = th->ractor;
1460 ractor_sched_set_unlocked(vm, cr);
1462 ractor_sched_set_locked(vm, cr);
1464 RUBY_DEBUG_LOG(
"wakeup serial:%u", barrier_serial);
1469rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1471 VM_ASSERT(cr->threads.sched.running != NULL);
1472 VM_ASSERT(cr == GET_RACTOR());
1473 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
1474 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1476#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1480 RUBY_DEBUG_LOG(
"join");
1484 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1485 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1487 ractor_sched_lock(vm, cr);
1490 vm->ractor.sched.barrier_waiting_cnt++;
1491 RUBY_DEBUG_LOG(
"waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1493 ractor_sched_barrier_join_signal_locked(vm);
1494 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1496 ractor_sched_unlock(vm, cr);
1506static void clear_thread_cache_altstack(
void);
1519 clear_thread_cache_altstack();
1523#ifdef RB_THREAD_T_HAS_NATIVE_ID
1525get_native_thread_id(
void)
1528 return (
int)syscall(SYS_gettid);
1529#elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1535#if defined(HAVE_WORKING_FORK)
1540 rb_thread_sched_init(sched,
true);
1541 rb_thread_t *th = GET_THREAD();
1542 rb_vm_t *vm = GET_VM();
1544 if (th_has_dedicated_nt(th)) {
1545 vm->ractor.sched.snt_cnt = 0;
1548 vm->ractor.sched.snt_cnt = 1;
1550 vm->ractor.sched.running_cnt = 0;
1553#if VM_CHECK_MODE > 0
1554 vm->ractor.sched.lock_owner = NULL;
1555 vm->ractor.sched.locked =
false;
1563 ccan_list_head_init(&vm->ractor.sched.grq);
1564 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1565 ccan_list_head_init(&vm->ractor.sched.running_threads);
1567 VM_ASSERT(sched->is_running);
1568 sched->is_running_timeslice =
false;
1570 if (sched->running != th) {
1571 thread_sched_to_running(sched, th);
1574 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1577#ifdef RB_THREAD_T_HAS_NATIVE_ID
1579 th->nt->tid = get_native_thread_id();
1586#ifdef RB_THREAD_LOCAL_SPECIFIER
1587static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1589static pthread_key_t ruby_native_thread_key;
1601ruby_thread_from_native(
void)
1603#ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread;
1606 return pthread_getspecific(ruby_native_thread_key);
1611ruby_thread_set_native(rb_thread_t *th)
1615 ccan_list_node_init(&th->sched.node.ubf);
1622 rb_ractor_set_current_ec(th->ractor, th->ec);
1624#ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread = th;
1628 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1636Init_native_thread(rb_thread_t *main_th)
1638#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic) {
1640 int r = pthread_condattr_init(condattr_monotonic);
1642 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1644 if (r) condattr_monotonic = NULL;
1648#ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1650 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
1652 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1653 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
1656 ruby_posix_signal(SIGVTALRM, null_func);
1659 rb_vm_t *vm = main_th->vm;
1665 ccan_list_head_init(&vm->ractor.sched.grq);
1666 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1667 ccan_list_head_init(&vm->ractor.sched.running_threads);
1670 main_th->nt->thread_id = pthread_self();
1671 main_th->nt->serial = 1;
1672#ifdef RUBY_NT_SERIAL
1675 ruby_thread_set_native(main_th);
1676 native_thread_setup(main_th->nt);
1677 native_thread_setup_on_thread(main_th->nt);
1679 TH_SCHED(main_th)->running = main_th;
1680 main_th->has_dedicated_nt = 1;
1682 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1685 main_th->nt->dedicated = 1;
1686 main_th->nt->vm = vm;
1689 vm->ractor.sched.dnt_cnt = 1;
1692extern int ruby_mn_threads_enabled;
1695ruby_mn_threads_params(
void)
1697 rb_vm_t *vm = GET_VM();
1698 rb_ractor_t *main_ractor = GET_RACTOR();
1700 const char *mn_threads_cstr = getenv(
"RUBY_MN_THREADS");
1701 bool enable_mn_threads =
false;
1703 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1705 ruby_mn_threads_enabled = 1;
1707 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1709 const char *max_cpu_cstr = getenv(
"RUBY_MAX_CPU");
1710 const int default_max_cpu = 8;
1711 int max_cpu = default_max_cpu;
1713 if (USE_MN_THREADS && max_cpu_cstr) {
1714 int given_max_cpu = atoi(max_cpu_cstr);
1715 if (given_max_cpu > 0) {
1716 max_cpu = given_max_cpu;
1720 vm->ractor.sched.max_cpu = max_cpu;
1724native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr,
struct rb_native_thread *nt)
1726 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1728 if (nt->dedicated == 0) {
1729 ractor_sched_lock(vm, cr);
1731 vm->ractor.sched.snt_cnt--;
1732 vm->ractor.sched.dnt_cnt++;
1734 ractor_sched_unlock(vm, cr);
1741native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr,
struct rb_native_thread *nt)
1743 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1744 VM_ASSERT(nt->dedicated > 0);
1747 if (nt->dedicated == 0) {
1748 ractor_sched_lock(vm, cr);
1750 nt->vm->ractor.sched.snt_cnt++;
1751 nt->vm->ractor.sched.dnt_cnt--;
1753 ractor_sched_unlock(vm, cr);
1760#if USE_RUBY_DEBUG_LOG
1763 RUBY_DEBUG_LOG(
"th:%d nt:%d->%d", (
int)th->serial, (
int)th->nt->serial, (
int)nt->serial);
1766 RUBY_DEBUG_LOG(
"th:%d nt:NULL->%d", (
int)th->serial, (
int)nt->serial);
1771 RUBY_DEBUG_LOG(
"th:%d nt:%d->NULL", (
int)th->serial, (
int)th->nt->serial);
1774 RUBY_DEBUG_LOG(
"th:%d nt:NULL->NULL", (
int)th->serial);
1788 if (&nt->cond.readyq != &nt->cond.intr) {
1792 RB_ALTSTACK_FREE(nt->altstack);
1798#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799#define STACKADDR_AVAILABLE 1
1800#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801#define STACKADDR_AVAILABLE 1
1802#undef MAINSTACKADDR_AVAILABLE
1803#define MAINSTACKADDR_AVAILABLE 1
1804void *pthread_get_stackaddr_np(pthread_t);
1805size_t pthread_get_stacksize_np(pthread_t);
1806#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807#define STACKADDR_AVAILABLE 1
1808#elif defined HAVE_PTHREAD_GETTHRDS_NP
1809#define STACKADDR_AVAILABLE 1
1810#elif defined __HAIKU__
1811#define STACKADDR_AVAILABLE 1
1814#ifndef MAINSTACKADDR_AVAILABLE
1815# ifdef STACKADDR_AVAILABLE
1816# define MAINSTACKADDR_AVAILABLE 1
1818# define MAINSTACKADDR_AVAILABLE 0
1821#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822# define get_main_stack(addr, size) get_stack(addr, size)
1825#ifdef STACKADDR_AVAILABLE
1830get_stack(
void **addr,
size_t *size)
1832#define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834#ifdef HAVE_PTHREAD_GETATTR_NP
1835 pthread_attr_t attr;
1837 STACK_GROW_DIR_DETECTION;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1839# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1841 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1846# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1849 guard = getpagesize();
1852 pthread_attr_destroy(&attr);
1853#elif defined HAVE_PTHREAD_ATTR_GET_NP
1854 pthread_attr_t attr;
1855 CHECK_ERR(pthread_attr_init(&attr));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1857# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1863 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1864 pthread_attr_destroy(&attr);
1865#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
1866 pthread_t th = pthread_self();
1867 *addr = pthread_get_stackaddr_np(th);
1868 *size = pthread_get_stacksize_np(th);
1869#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1871# if defined HAVE_THR_STKSEGMENT
1872 CHECK_ERR(thr_stksegment(&stk));
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1877 *size = stk.ss_size;
1878#elif defined HAVE_PTHREAD_GETTHRDS_NP
1879 pthread_t th = pthread_self();
1880 struct __pthrdsinfo thinfo;
1882 int regsiz=
sizeof(reg);
1883 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1884 &thinfo,
sizeof(thinfo),
1886 *addr = thinfo.__pi_stackaddr;
1890 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1891 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1892#elif defined __HAIKU__
1894 STACK_GROW_DIR_DETECTION;
1895 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1896 *addr = info.stack_base;
1897 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1898 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1900#error STACKADDR_AVAILABLE is defined but not implemented.
1908 rb_nativethread_id_t id;
1909 size_t stack_maxsize;
1911} native_main_thread;
1913#ifdef STACK_END_ADDRESS
1914extern void *STACK_END_ADDRESS;
1918 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
1919 RUBY_STACK_SPACE_RATIO = 5
1923space_size(
size_t stack_size)
1925 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1926 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1927 return RUBY_STACK_SPACE_LIMIT;
1935native_thread_init_main_thread_stack(
void *addr)
1937 native_main_thread.id = pthread_self();
1938#ifdef RUBY_ASAN_ENABLED
1939 addr = asan_get_real_stack_addr((
void *)addr);
1942#if MAINSTACKADDR_AVAILABLE
1943 if (native_main_thread.stack_maxsize)
return;
1947 if (get_main_stack(&stackaddr, &size) == 0) {
1948 native_main_thread.stack_maxsize = size;
1949 native_main_thread.stack_start = stackaddr;
1954#ifdef STACK_END_ADDRESS
1955 native_main_thread.stack_start = STACK_END_ADDRESS;
1957 if (!native_main_thread.stack_start ||
1958 STACK_UPPER((
VALUE *)(
void *)&addr,
1959 native_main_thread.stack_start > (
VALUE *)addr,
1960 native_main_thread.stack_start < (
VALUE *)addr)) {
1961 native_main_thread.stack_start = (
VALUE *)addr;
1965#if defined(HAVE_GETRLIMIT)
1966#if defined(PTHREAD_STACK_DEFAULT)
1967# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1968# error "PTHREAD_STACK_DEFAULT is too small"
1970 size_t size = PTHREAD_STACK_DEFAULT;
1972 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1975 int pagesize = getpagesize();
1977 STACK_GROW_DIR_DETECTION;
1978 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1979 size = (size_t)rlim.rlim_cur;
1981 addr = native_main_thread.stack_start;
1982 if (IS_STACK_DIR_UPPER()) {
1983 space = ((size_t)((
char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1986 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
1988 native_main_thread.stack_maxsize = space;
1992#if MAINSTACKADDR_AVAILABLE
1999 STACK_GROW_DIR_DETECTION;
2001 if (IS_STACK_DIR_UPPER()) {
2002 start = native_main_thread.stack_start;
2003 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2006 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2007 end = native_main_thread.stack_start;
2010 if ((
void *)addr < start || (
void *)addr > end) {
2012 native_main_thread.stack_start = (
VALUE *)addr;
2013 native_main_thread.stack_maxsize = 0;
2018#define CHECK_ERR(expr) \
2019 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2022native_thread_init_stack(rb_thread_t *th,
void *local_in_parent_frame)
2024 rb_nativethread_id_t curr = pthread_self();
2025#ifdef RUBY_ASAN_ENABLED
2026 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2027 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2030 if (!native_main_thread.id) {
2033 native_thread_init_main_thread_stack(local_in_parent_frame);
2036 if (pthread_equal(curr, native_main_thread.id)) {
2037 th->ec->machine.stack_start = native_main_thread.stack_start;
2038 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2041#ifdef STACKADDR_AVAILABLE
2042 if (th_has_dedicated_nt(th)) {
2046 if (get_stack(&start, &size) == 0) {
2047 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2048 th->ec->machine.stack_start = local_in_parent_frame;
2049 th->ec->machine.stack_maxsize = size - diff;
2053 rb_raise(
rb_eNotImpError,
"ruby engine can initialize only in the main thread");
2062 struct rb_native_thread *nt;
2072 pthread_attr_t attr;
2074 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2075 const size_t space = space_size(stack_size);
2077 nt->machine_stack_maxsize = stack_size - space;
2079#ifdef USE_SIGALTSTACK
2080 nt->altstack = rb_allocate_sigaltstack();
2083 CHECK_ERR(pthread_attr_init(&attr));
2085# ifdef PTHREAD_STACK_MIN
2086 RUBY_DEBUG_LOG(
"stack size: %lu", (
unsigned long)stack_size);
2087 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2090# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2091 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2093 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2095 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2097 RUBY_DEBUG_LOG(
"nt:%d err:%d", (
int)nt->serial, err);
2099 CHECK_ERR(pthread_attr_destroy(&attr));
2110 if (&nt->cond.readyq != &nt->cond.intr) {
2119#ifdef RB_THREAD_T_HAS_NATIVE_ID
2120 nt->tid = get_native_thread_id();
2124 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2128native_thread_alloc(
void)
2131 native_thread_setup(nt);
2137#if USE_RUBY_DEBUG_LOG
2145native_thread_create_dedicated(rb_thread_t *th)
2147 th->nt = native_thread_alloc();
2148 th->nt->vm = th->vm;
2149 th->nt->running_thread = th;
2150 th->nt->dedicated = 1;
2153 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size /
sizeof(
VALUE);
2155 th->sched.malloc_stack =
true;
2156 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2157 th->sched.context_stack = vm_stack;
2160 int err = native_thread_create0(th->nt);
2163 thread_sched_to_ready(TH_SCHED(th), th);
2169call_thread_start_func_2(rb_thread_t *th)
2177 VALUE stack_start = 0;
2178 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2180 native_thread_init_stack(th, stack_start_addr);
2181 thread_start_func_2(th, th->ec->machine.stack_start);
2188 rb_vm_t *vm = nt->vm;
2190 native_thread_setup_on_thread(nt);
2193#ifdef RB_THREAD_T_HAS_NATIVE_ID
2194 nt->tid = get_native_thread_id();
2197#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2198 ruby_nt_serial = nt->serial;
2201 RUBY_DEBUG_LOG(
"nt:%u", nt->serial);
2203 if (!nt->dedicated) {
2204 coroutine_initialize_main(nt->nt_context);
2208 if (nt->dedicated) {
2210 rb_thread_t *th = nt->running_thread;
2213 RUBY_DEBUG_LOG(
"on dedicated th:%u", rb_th_serial(th));
2214 ruby_thread_set_native(th);
2216 thread_sched_lock(sched, th);
2218 if (sched->running == th) {
2219 thread_sched_add_running_thread(sched, th);
2221 thread_sched_wait_running_turn(sched, th,
false);
2223 thread_sched_unlock(sched, th);
2226 call_thread_start_func_2(th);
2230 RUBY_DEBUG_LOG(
"check next");
2231 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2236 thread_sched_lock(sched, NULL);
2238 rb_thread_t *next_th = sched->running;
2240 if (next_th && next_th->nt == NULL) {
2241 RUBY_DEBUG_LOG(
"nt:%d next_th:%d", (
int)nt->serial, (
int)next_th->serial);
2242 thread_sched_switch0(nt->nt_context, next_th, nt,
false);
2245 RUBY_DEBUG_LOG(
"no schedulable threads -- next_th:%p", next_th);
2248 thread_sched_unlock(sched, NULL);
2255 if (nt->dedicated) {
2265static int native_thread_create_shared(rb_thread_t *th);
2268static void nt_free_stack(
void *mstack);
2272rb_threadptr_remove(rb_thread_t *th)
2275 if (th->sched.malloc_stack) {
2280 rb_vm_t *vm = th->vm;
2281 th->sched.finished =
false;
2285 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2293rb_threadptr_sched_free(rb_thread_t *th)
2296 if (th->sched.malloc_stack) {
2299 native_thread_destroy(th->nt);
2302 nt_free_stack(th->sched.context_stack);
2307 th->sched.context = NULL;
2311 native_thread_destroy(th->nt);
2318rb_thread_sched_mark_zombies(rb_vm_t *vm)
2320 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2321 rb_thread_t *zombie_th, *next_zombie_th;
2322 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2323 if (zombie_th->sched.finished) {
2324 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2327 rb_gc_mark(zombie_th->self);
2334native_thread_create(rb_thread_t *th)
2336 VM_ASSERT(th->nt == 0);
2337 RUBY_DEBUG_LOG(
"th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2340 if (!th->ractor->threads.sched.enable_mn_threads) {
2341 th->has_dedicated_nt = 1;
2344 if (th->has_dedicated_nt) {
2345 return native_thread_create_dedicated(th);
2348 return native_thread_create_shared(th);
2352#if USE_NATIVE_THREAD_PRIORITY
2355native_thread_apply_priority(rb_thread_t *th)
2357#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2358 struct sched_param sp;
2360 int priority = 0 - th->priority;
2362 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2363 max = sched_get_priority_max(policy);
2364 min = sched_get_priority_min(policy);
2366 if (min > priority) {
2369 else if (max < priority) {
2373 sp.sched_priority = priority;
2374 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2385 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2389ubf_pthread_cond_signal(
void *ptr)
2391 rb_thread_t *th = (rb_thread_t *)ptr;
2392 RUBY_DEBUG_LOG(
"th:%u on nt:%d", rb_th_serial(th), (
int)th->nt->serial);
2397native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2399 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2400 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2410 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2412 THREAD_BLOCKING_BEGIN(th);
2415 th->unblock.func = ubf_pthread_cond_signal;
2416 th->unblock.arg = th;
2418 if (RUBY_VM_INTERRUPTED(th->ec)) {
2420 RUBY_DEBUG_LOG(
"interrupted before sleep th:%u", rb_th_serial(th));
2433 end = native_cond_timeout(cond, *rel);
2434 native_cond_timedwait(cond, lock, &end);
2437 th->unblock.func = 0;
2441 THREAD_BLOCKING_END(th);
2443 RUBY_DEBUG_LOG(
"done th:%u", rb_th_serial(th));
2447static CCAN_LIST_HEAD(ubf_list_head);
2448static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2451ubf_list_atfork(
void)
2453 ccan_list_head_init(&ubf_list_head);
2459ubf_list_contain_p(rb_thread_t *th)
2461 rb_thread_t *list_th;
2462 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2463 if (list_th == th)
return true;
2470register_ubf_list(rb_thread_t *th)
2472 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2473 struct ccan_list_node *node = &th->sched.node.ubf;
2475 VM_ASSERT(th->unblock.func != NULL);
2480 if (ccan_list_empty((
struct ccan_list_head*)node)) {
2481 VM_ASSERT(!ubf_list_contain_p(th));
2482 ccan_list_add(&ubf_list_head, node);
2487 timer_thread_wakeup();
2492unregister_ubf_list(rb_thread_t *th)
2494 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2495 struct ccan_list_node *node = &th->sched.node.ubf;
2498 VM_ASSERT(th->unblock.func == NULL);
2500 if (!ccan_list_empty((
struct ccan_list_head*)node)) {
2503 VM_ASSERT(ubf_list_contain_p(th));
2504 ccan_list_del_init(node);
2515ubf_wakeup_thread(rb_thread_t *th)
2517 RUBY_DEBUG_LOG(
"th:%u thread_id:%p", rb_th_serial(th), (
void *)th->nt->thread_id);
2519 pthread_kill(th->nt->thread_id, SIGVTALRM);
2523ubf_select(
void *ptr)
2525 rb_thread_t *th = (rb_thread_t *)ptr;
2526 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(th));
2527 ubf_wakeup_thread(th);
2528 register_ubf_list(th);
2532ubf_threads_empty(
void)
2534 return ccan_list_empty(&ubf_list_head) != 0;
2538ubf_wakeup_all_threads(
void)
2540 if (!ubf_threads_empty()) {
2544 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2545 ubf_wakeup_thread(th);
2553#define register_ubf_list(th) (void)(th)
2554#define unregister_ubf_list(th) (void)(th)
2556static void ubf_wakeup_all_threads(
void) {
return; }
2557static bool ubf_threads_empty(
void) {
return true; }
2558#define ubf_list_atfork() do {} while (0)
2562#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2565rb_thread_wakeup_timer_thread(
int sig)
2571 timer_thread_wakeup_force();
2574 if (system_working) {
2575 rb_vm_t *vm = GET_VM();
2576 rb_thread_t *main_th = vm->ractor.main_thread;
2579 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2582 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2584 if (vm->ubf_async_safe && main_th->unblock.func) {
2585 (main_th->unblock.func)(main_th->unblock.arg);
2592#define CLOSE_INVALIDATE_PAIR(expr) \
2593 close_invalidate_pair(expr,"close_invalidate: "#expr)
2595close_invalidate(
int *fdp,
const char *msg)
2600 if (close(fd) < 0) {
2601 async_bug_fd(msg,
errno, fd);
2606close_invalidate_pair(
int fds[2],
const char *msg)
2608 if (USE_EVENTFD && fds[0] == fds[1]) {
2610 close_invalidate(&fds[0], msg);
2613 close_invalidate(&fds[1], msg);
2614 close_invalidate(&fds[0], msg);
2624 oflags = fcntl(fd, F_GETFL);
2627 oflags |= O_NONBLOCK;
2628 err = fcntl(fd, F_SETFL, oflags);
2635setup_communication_pipe_internal(
int pipes[2])
2639 if (pipes[0] > 0 || pipes[1] > 0) {
2640 VM_ASSERT(pipes[0] > 0);
2641 VM_ASSERT(pipes[1] > 0);
2649#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2650 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2652 if (pipes[0] >= 0) {
2660 rb_bug(
"can not create communication pipe");
2664 set_nonblock(pipes[0]);
2665 set_nonblock(pipes[1]);
2668#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2669# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2674#if defined(__linux__)
2676#elif defined(__APPLE__)
2684static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2687native_set_thread_name(rb_thread_t *th)
2689#ifdef SET_CURRENT_THREAD_NAME
2691 if (!
NIL_P(loc = th->name)) {
2692 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2694 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
2696 char buf[THREAD_NAME_MAX];
2701 p = strrchr(name,
'/');
2709 if (
len >=
sizeof(buf)) {
2710 buf[
sizeof(buf)-2] =
'*';
2711 buf[
sizeof(buf)-1] =
'\0';
2713 SET_CURRENT_THREAD_NAME(buf);
2719native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
2721#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2722 char buf[THREAD_NAME_MAX];
2724# if !defined SET_ANOTHER_THREAD_NAME
2725 if (!pthread_equal(pthread_self(), thread_id))
return;
2730 if (n >= (
int)
sizeof(buf)) {
2731 memcpy(buf, s,
sizeof(buf)-1);
2732 buf[
sizeof(buf)-1] =
'\0';
2736# if defined SET_ANOTHER_THREAD_NAME
2737 SET_ANOTHER_THREAD_NAME(thread_id, s);
2738# elif defined SET_CURRENT_THREAD_NAME
2739 SET_CURRENT_THREAD_NAME(s);
2744#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2746native_thread_native_thread_id(rb_thread_t *target_th)
2748 if (!target_th->nt)
return Qnil;
2750#ifdef RB_THREAD_T_HAS_NATIVE_ID
2751 int tid = target_th->nt->tid;
2752 if (tid == 0)
return Qnil;
2754#elif defined(__APPLE__)
2760# if (!defined(MAC_OS_X_VERSION_10_6) || \
2761 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2762 defined(__POWERPC__) )
2763 const bool no_pthread_threadid_np =
true;
2764# define NO_PTHREAD_MACH_THREAD_NP 1
2765# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2766 const bool no_pthread_threadid_np =
false;
2768# if !(defined(__has_attribute) && __has_attribute(availability))
2770 __attribute__((weak))
int pthread_threadid_np(pthread_t, uint64_t*);
2773 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2775 if (no_pthread_threadid_np) {
2776 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2778# ifndef NO_PTHREAD_MACH_THREAD_NP
2779 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2781 return ULL2NUM((
unsigned long long)tid);
2785# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2787# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2791 rb_serial_t created_fork_gen;
2792 pthread_t pthread_id;
2796#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2799#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2800#define EPOLL_EVENTS_MAX 0x10
2801 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2802#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2803#define KQUEUE_EVENTS_MAX 0x10
2804 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2808 struct ccan_list_head waiting;
2809 pthread_mutex_t waiting_lock;
2811 .created_fork_gen = 0,
2814#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2816static void timer_thread_check_timeslice(rb_vm_t *vm);
2817static int timer_thread_set_timeout(rb_vm_t *vm);
2818static void timer_thread_wakeup_thread(rb_thread_t *th);
2820#include "thread_pthread_mn.c"
2826 return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
2834timer_thread_set_timeout(rb_vm_t *vm)
2841 ractor_sched_lock(vm, NULL);
2843 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads)
2844 || !ubf_threads_empty()
2845 || vm->ractor.sched.grq_cnt > 0
2848 RUBY_DEBUG_LOG(
"timeslice:%d ubf:%d grq:%d",
2849 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2850 !ubf_threads_empty(),
2851 (vm->ractor.sched.grq_cnt > 0));
2854 vm->ractor.sched.timeslice_wait_inf =
false;
2857 vm->ractor.sched.timeslice_wait_inf =
true;
2860 ractor_sched_unlock(vm, NULL);
2867 rb_thread_t *th = thread_sched_waiting_thread(w);
2869 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2870 rb_hrtime_t now = rb_hrtime_now();
2871 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2873 RUBY_DEBUG_LOG(
"th:%u now:%lu rel:%lu", rb_th_serial(th), (
unsigned long)now, (
unsigned long)hrrel);
2876 int thread_timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC);
2879 if (timeout < 0 || thread_timeout < timeout) {
2880 timeout = thread_timeout;
2886 RUBY_DEBUG_LOG(
"timeout:%d inf:%d", timeout, (
int)vm->ractor.sched.timeslice_wait_inf);
2894timer_thread_check_signal(rb_vm_t *vm)
2898 int signum = rb_signal_buff_size();
2899 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2900 RUBY_DEBUG_LOG(
"signum:%d", signum);
2901 threadptr_trap_interrupt(vm->ractor.main_thread);
2906timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2911 else if (abs - now < RB_HRTIME_PER_MSEC) {
2920timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2925 (w->flags & thread_sched_waiting_timeout) &&
2926 timer_thread_check_exceed(w->data.timeout, now)) {
2928 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2931 ccan_list_del_init(&w->node);
2934 w->flags = thread_sched_waiting_none;
2937 return thread_sched_waiting_thread(w);
2944timer_thread_wakeup_thread(rb_thread_t *th)
2946 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2949 thread_sched_lock(sched, th);
2951 if (sched->running != th) {
2952 thread_sched_to_ready_common(sched, th,
true,
false);
2958 thread_sched_unlock(sched, th);
2962timer_thread_check_timeout(rb_vm_t *vm)
2964 rb_hrtime_t now = rb_hrtime_now();
2969 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2970 timer_thread_wakeup_thread(th);
2977timer_thread_check_timeslice(rb_vm_t *vm)
2981 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2982 RUBY_DEBUG_LOG(
"timeslice th:%u", rb_th_serial(th));
2983 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2991 pthread_sigmask(0, NULL, &oldmask);
2992 if (sigismember(&oldmask, SIGVTALRM)) {
2996 RUBY_DEBUG_LOG(
"ok");
3001timer_thread_func(
void *ptr)
3003 rb_vm_t *vm = (rb_vm_t *)ptr;
3004#if defined(RUBY_NT_SERIAL)
3008 RUBY_DEBUG_LOG(
"started%s",
"");
3010 while (system_working) {
3011 timer_thread_check_signal(vm);
3012 timer_thread_check_timeout(vm);
3013 ubf_wakeup_all_threads();
3015 RUBY_DEBUG_LOG(
"system_working:%d", system_working);
3016 timer_thread_polling(vm);
3019 RUBY_DEBUG_LOG(
"terminated");
3025signal_communication_pipe(
int fd)
3028 const uint64_t buff = 1;
3030 const char buff =
'!';
3037 if ((result = write(fd, &buff,
sizeof(buff))) <= 0) {
3040 case EINTR:
goto retry;
3042#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3047 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
3050 if (TT_DEBUG) WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
3058timer_thread_wakeup_force(
void)
3061 signal_communication_pipe(timer_th.comm_fds[1]);
3065timer_thread_wakeup_locked(rb_vm_t *vm)
3068 ASSERT_ractor_sched_locked(vm, NULL);
3070 if (timer_th.created_fork_gen == current_fork_gen) {
3071 if (vm->ractor.sched.timeslice_wait_inf) {
3072 RUBY_DEBUG_LOG(
"wakeup with fd:%d", timer_th.comm_fds[1]);
3073 timer_thread_wakeup_force();
3076 RUBY_DEBUG_LOG(
"will be wakeup...");
3082timer_thread_wakeup(
void)
3084 rb_vm_t *vm = GET_VM();
3086 ractor_sched_lock(vm, NULL);
3088 timer_thread_wakeup_locked(vm);
3090 ractor_sched_unlock(vm, NULL);
3094rb_thread_create_timer_thread(
void)
3096 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3098 RUBY_DEBUG_LOG(
"fork_gen create:%d current:%d", (
int)created_fork_gen, (
int)current_fork_gen);
3100 timer_th.created_fork_gen = current_fork_gen;
3102 if (created_fork_gen != current_fork_gen) {
3103 if (created_fork_gen != 0) {
3104 RUBY_DEBUG_LOG(
"forked child process");
3106 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3107#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3108 close_invalidate(&timer_th.event_fd,
"close event_fd");
3113 ccan_list_head_init(&timer_th.waiting);
3117 setup_communication_pipe_internal(timer_th.comm_fds);
3120 timer_thread_setup_mn();
3123 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3127native_stop_timer_thread(
void)
3130 stopped = --system_working <= 0;
3133 RUBY_DEBUG_LOG(
"wakeup send %d", timer_th.comm_fds[1]);
3134 timer_thread_wakeup_force();
3135 RUBY_DEBUG_LOG(
"wakeup sent");
3136 pthread_join(timer_th.pthread_id, NULL);
3139 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
3144native_reset_timer_thread(
void)
3149#ifdef HAVE_SIGALTSTACK
3151ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
3155 const size_t water_mark = 1024 * 1024;
3156 STACK_GROW_DIR_DETECTION;
3158#ifdef STACKADDR_AVAILABLE
3159 if (get_stack(&base, &size) == 0) {
3161 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3163 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3164 size = (size_t)rlim.rlim_cur;
3168 base = (
char *)base + STACK_DIR_UPPER(+size, -size);
3173 size = th->ec->machine.stack_maxsize;
3174 base = (
char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3179 size /= RUBY_STACK_SPACE_RATIO;
3180 if (size > water_mark) size = water_mark;
3181 if (IS_STACK_DIR_UPPER()) {
3182 if (size > ~(
size_t)base+1) size = ~(size_t)base+1;
3183 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
3186 if (size > (
size_t)base) size = (size_t)base;
3187 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
3197 if (fd < 0)
return 0;
3199 if (fd == timer_th.comm_fds[0] ||
3200 fd == timer_th.comm_fds[1]
3201#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3202 || fd == timer_th.event_fd
3205 goto check_fork_gen;
3210 if (timer_th.created_fork_gen == current_fork_gen) {
3222 return pthread_self();
3225#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3228ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
3229 const struct timespec *ts,
const sigset_t *sigmask)
3236 if (ts->tv_sec > INT_MAX/1000)
3237 timeout_ms = INT_MAX;
3239 tmp = (int)(ts->tv_sec * 1000);
3241 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3242 if (INT_MAX - tmp < tmp2)
3243 timeout_ms = INT_MAX;
3245 timeout_ms = (int)(tmp + tmp2);
3251 return poll(fds, nfds, timeout_ms);
3253# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3266#define THREAD_BLOCKING_YIELD(th) do { \
3267 const rb_thread_t *next_th; \
3268 struct rb_thread_sched *sched = TH_SCHED(th); \
3269 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3270 thread_sched_to_waiting(sched, (th)); \
3271 next_th = sched->running; \
3272 rb_native_mutex_unlock(&sched->lock_); \
3273 native_thread_yield(); \
3274 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3275 native_thread_yield(); \
3279native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3283 RUBY_DEBUG_LOG(
"rel:%d", rel ? (
int)*rel : 0);
3285 if (th_has_dedicated_nt(th)) {
3286 native_cond_sleep(th, rel);
3289 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3293 thread_sched_to_waiting_until_wakeup(sched, th);
3296 RUBY_DEBUG_LOG(
"wakeup");
3300static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3303rb_thread_release_fork_lock(
void)
3306 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3312rb_thread_reset_fork_lock(
void)
3315 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3319 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3325rb_thread_prevent_fork(
void *(*func)(
void *),
void *data)
3328 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3331 void *result = func(data);
3332 rb_thread_release_fork_lock();
3337rb_thread_acquire_fork_lock(
void)
3340 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3347struct rb_internal_thread_event_hook {
3348 rb_internal_thread_event_callback callback;
3352 struct rb_internal_thread_event_hook *next;
3355static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3357rb_internal_thread_event_hook_t *
3360 rb_internal_thread_event_hook_t *hook =
ALLOC_N(rb_internal_thread_event_hook_t, 1);
3361 hook->callback = callback;
3362 hook->user_data = user_data;
3363 hook->event = internal_event;
3366 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3370 hook->next = rb_internal_thread_event_hooks;
3371 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3373 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3383 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3387 bool success = FALSE;
3389 if (rb_internal_thread_event_hooks == hook) {
3390 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3394 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3397 if (h->next == hook) {
3398 h->next = hook->next;
3402 }
while ((h = h->next));
3405 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3419 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3423 if (rb_internal_thread_event_hooks) {
3424 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3426 if (h->event & event) {
3427 rb_internal_thread_event_data_t event_data = {
3430 (*h->callback)(event, &event_data, h->user_data);
3432 }
while((h = h->next));
3434 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3444 rb_thread_t *th = GET_THREAD();
3445 bool is_snt = th->nt->dedicated == 0;
3446 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
uint32_t rb_event_flag_t
Represents event(s).
#define INT2FIX
Old name of RB_INT2FIX.
#define ZALLOC
Old name of RB_ZALLOC.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]].
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define RARRAY_AREF(a, i)
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
#define errno
Ractor-aware version of errno.
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
void ruby_xfree(void *ptr)
Deallocates a storage instance.
void * ruby_xmalloc(size_t size)
Allocates a storage instance.