Ruby 3.3.5p100 (2024-09-03 revision ef084cc8f4958c1b6e4ead99136631bef6d8ddba)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "rjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150
151static volatile int system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158 struct rb_io_close_wait_list *busy;
159};
160
161/********************************************************************************/
162
163#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164
166 enum rb_thread_status prev_status;
167};
168
169static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170static void unblock_function_clear(rb_thread_t *th);
171
172static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175
176#define THREAD_BLOCKING_BEGIN(th) do { \
177 struct rb_thread_sched * const sched = TH_SCHED(th); \
178 RB_VM_SAVE_MACHINE_CONTEXT(th); \
179 thread_sched_to_waiting((sched), (th));
180
181#define THREAD_BLOCKING_END(th) \
182 thread_sched_to_running((sched), (th)); \
183 rb_ractor_thread_switch(th->ractor, th); \
184} while(0)
185
186#ifdef __GNUC__
187#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189#else
190#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191#endif
192#else
193#define only_if_constant(expr, notconst) notconst
194#endif
195#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196 struct rb_blocking_region_buffer __region; \
197 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198 /* always return true unless fail_if_interrupted */ \
199 !only_if_constant(fail_if_interrupted, TRUE)) { \
200 /* Important that this is inlined into the macro, and not part of \
201 * blocking_region_begin - see bug #20493 */ \
202 RB_VM_SAVE_MACHINE_CONTEXT(th); \
203 thread_sched_to_waiting(TH_SCHED(th), th); \
204 exec; \
205 blocking_region_end(th, &__region); \
206 }; \
207} while(0)
208
209/*
210 * returns true if this thread was spuriously interrupted, false otherwise
211 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
212 */
213#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
214static inline int
215vm_check_ints_blocking(rb_execution_context_t *ec)
216{
217 rb_thread_t *th = rb_ec_thread_ptr(ec);
218
219 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
220 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
221 }
222 else {
223 th->pending_interrupt_queue_checked = 0;
224 RUBY_VM_SET_INTERRUPT(ec);
225 }
226 return rb_threadptr_execute_interrupts(th, 1);
227}
228
229int
230rb_vm_check_ints_blocking(rb_execution_context_t *ec)
231{
232 return vm_check_ints_blocking(ec);
233}
234
235/*
236 * poll() is supported by many OSes, but so far Linux is the only
237 * one we know of that supports using poll() in all places select()
238 * would work.
239 */
240#if defined(HAVE_POLL)
241# if defined(__linux__)
242# define USE_POLL
243# endif
244# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
245# define USE_POLL
246 /* FreeBSD does not set POLLOUT when POLLHUP happens */
247# define POLLERR_SET (POLLHUP | POLLERR)
248# endif
249#endif
250
251static void
252timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253 const struct timeval *timeout)
254{
255 if (timeout) {
256 *rel = rb_timeval2hrtime(timeout);
257 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258 *to = rel;
259 }
260 else {
261 *to = 0;
262 }
263}
264
265MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
266MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
267MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
268
269#include THREAD_IMPL_SRC
270
271/*
272 * TODO: somebody with win32 knowledge should be able to get rid of
273 * timer-thread by busy-waiting on signals. And it should be possible
274 * to make the GVL in thread_pthread.c be platform-independent.
275 */
276#ifndef BUSY_WAIT_SIGNALS
277# define BUSY_WAIT_SIGNALS (0)
278#endif
279
280#ifndef USE_EVENTFD
281# define USE_EVENTFD (0)
282#endif
283
284#include "thread_sync.c"
285
286void
287rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
288{
290}
291
292void
293rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
294{
296}
297
298void
299rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
300{
302}
303
304void
305rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
306{
308}
309
310static int
311unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312{
313 do {
314 if (fail_if_interrupted) {
315 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316 return FALSE;
317 }
318 }
319 else {
320 RUBY_VM_CHECK_INTS(th->ec);
321 }
322
323 rb_native_mutex_lock(&th->interrupt_lock);
324 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326
327 VM_ASSERT(th->unblock.func == NULL);
328
329 th->unblock.func = func;
330 th->unblock.arg = arg;
331 rb_native_mutex_unlock(&th->interrupt_lock);
332
333 return TRUE;
334}
335
336static void
337unblock_function_clear(rb_thread_t *th)
338{
339 rb_native_mutex_lock(&th->interrupt_lock);
340 th->unblock.func = 0;
341 rb_native_mutex_unlock(&th->interrupt_lock);
342}
343
344static void
345rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
346{
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 rb_native_mutex_lock(&th->interrupt_lock);
350 {
351 if (trap) {
352 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
353 }
354 else {
355 RUBY_VM_SET_INTERRUPT(th->ec);
356 }
357
358 if (th->unblock.func != NULL) {
359 (th->unblock.func)(th->unblock.arg);
360 }
361 else {
362 /* none */
363 }
364 }
365 rb_native_mutex_unlock(&th->interrupt_lock);
366}
367
368void
369rb_threadptr_interrupt(rb_thread_t *th)
370{
371 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
372 rb_threadptr_interrupt_common(th, 0);
373}
374
375static void
376threadptr_trap_interrupt(rb_thread_t *th)
377{
378 rb_threadptr_interrupt_common(th, 1);
379}
380
381static void
382terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
383{
384 rb_thread_t *th = 0;
385
386 ccan_list_for_each(&r->threads.set, th, lt_node) {
387 if (th != main_thread) {
388 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
389
390 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
391 rb_threadptr_interrupt(th);
392
393 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
394 }
395 else {
396 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
397 }
398 }
399}
400
401static void
402rb_threadptr_join_list_wakeup(rb_thread_t *thread)
403{
404 while (thread->join_list) {
405 struct rb_waiting_list *join_list = thread->join_list;
406
407 // Consume the entry from the join list:
408 thread->join_list = join_list->next;
409
410 rb_thread_t *target_thread = join_list->thread;
411
412 if (target_thread->scheduler != Qnil && join_list->fiber) {
413 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
414 }
415 else {
416 rb_threadptr_interrupt(target_thread);
417
418 switch (target_thread->status) {
419 case THREAD_STOPPED:
420 case THREAD_STOPPED_FOREVER:
421 target_thread->status = THREAD_RUNNABLE;
422 break;
423 default:
424 break;
425 }
426 }
427 }
428}
429
430void
431rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
432{
433 while (th->keeping_mutexes) {
434 rb_mutex_t *mutex = th->keeping_mutexes;
435 th->keeping_mutexes = mutex->next_mutex;
436
437 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
438
439 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
440 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
441 }
442}
443
444void
445rb_thread_terminate_all(rb_thread_t *th)
446{
447 rb_ractor_t *cr = th->ractor;
448 rb_execution_context_t * volatile ec = th->ec;
449 volatile int sleeping = 0;
450
451 if (cr->threads.main != th) {
452 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
453 (void *)cr->threads.main, (void *)th);
454 }
455
456 /* unlock all locking mutexes */
457 rb_threadptr_unlock_all_locking_mutexes(th);
458
459 EC_PUSH_TAG(ec);
460 if (EC_EXEC_TAG() == TAG_NONE) {
461 retry:
462 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
463
464 terminate_all(cr, th);
465
466 while (rb_ractor_living_thread_num(cr) > 1) {
467 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
468 /*q
469 * Thread exiting routine in thread_start_func_2 notify
470 * me when the last sub-thread exit.
471 */
472 sleeping = 1;
473 native_sleep(th, &rel);
474 RUBY_VM_CHECK_INTS_BLOCKING(ec);
475 sleeping = 0;
476 }
477 }
478 else {
479 /*
480 * When caught an exception (e.g. Ctrl+C), let's broadcast
481 * kill request again to ensure killing all threads even
482 * if they are blocked on sleep, mutex, etc.
483 */
484 if (sleeping) {
485 sleeping = 0;
486 goto retry;
487 }
488 }
489 EC_POP_TAG();
490}
491
492void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
493
494static void
495thread_cleanup_func_before_exec(void *th_ptr)
496{
497 rb_thread_t *th = th_ptr;
498 th->status = THREAD_KILLED;
499
500 // The thread stack doesn't exist in the forked process:
501 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
502
503 rb_threadptr_root_fiber_terminate(th);
504}
505
506static void
507thread_cleanup_func(void *th_ptr, int atfork)
508{
509 rb_thread_t *th = th_ptr;
510
511 th->locking_mutex = Qfalse;
512 thread_cleanup_func_before_exec(th_ptr);
513
514 /*
515 * Unfortunately, we can't release native threading resource at fork
516 * because libc may have unstable locking state therefore touching
517 * a threading resource may cause a deadlock.
518 */
519 if (atfork) {
520 th->nt = NULL;
521 return;
522 }
523
524 rb_native_mutex_destroy(&th->interrupt_lock);
525}
526
527static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
528static VALUE rb_thread_to_s(VALUE thread);
529
530void
531ruby_thread_init_stack(rb_thread_t *th)
532{
533 native_thread_init_stack(th);
534}
535
536const VALUE *
537rb_vm_proc_local_ep(VALUE proc)
538{
539 const VALUE *ep = vm_proc_ep(proc);
540
541 if (ep) {
542 return rb_vm_ep_local_ep(ep);
543 }
544 else {
545 return NULL;
546 }
547}
548
549// for ractor, defined in vm.c
550VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
551 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
552
553static VALUE
554thread_do_start_proc(rb_thread_t *th)
555{
556 VALUE args = th->invoke_arg.proc.args;
557 const VALUE *args_ptr;
558 int args_len;
559 VALUE procval = th->invoke_arg.proc.proc;
560 rb_proc_t *proc;
561 GetProcPtr(procval, proc);
562
563 th->ec->errinfo = Qnil;
564 th->ec->root_lep = rb_vm_proc_local_ep(procval);
565 th->ec->root_svar = Qfalse;
566
567 vm_check_ints_blocking(th->ec);
568
569 if (th->invoke_type == thread_invoke_type_ractor_proc) {
570 VALUE self = rb_ractor_self(th->ractor);
571 VM_ASSERT(FIXNUM_P(args));
572 args_len = FIX2INT(args);
573 args_ptr = ALLOCA_N(VALUE, args_len);
574 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
575 vm_check_ints_blocking(th->ec);
576
577 return rb_vm_invoke_proc_with_self(
578 th->ec, proc, self,
579 args_len, args_ptr,
580 th->invoke_arg.proc.kw_splat,
581 VM_BLOCK_HANDLER_NONE
582 );
583 }
584 else {
585 args_len = RARRAY_LENINT(args);
586 if (args_len < 8) {
587 /* free proc.args if the length is enough small */
588 args_ptr = ALLOCA_N(VALUE, args_len);
589 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
590 th->invoke_arg.proc.args = Qnil;
591 }
592 else {
593 args_ptr = RARRAY_CONST_PTR(args);
594 }
595
596 vm_check_ints_blocking(th->ec);
597
598 return rb_vm_invoke_proc(
599 th->ec, proc,
600 args_len, args_ptr,
601 th->invoke_arg.proc.kw_splat,
602 VM_BLOCK_HANDLER_NONE
603 );
604 }
605}
606
607static VALUE
608thread_do_start(rb_thread_t *th)
609{
610 native_set_thread_name(th);
611 VALUE result = Qundef;
612
613 switch (th->invoke_type) {
614 case thread_invoke_type_proc:
615 result = thread_do_start_proc(th);
616 break;
617
618 case thread_invoke_type_ractor_proc:
619 result = thread_do_start_proc(th);
620 rb_ractor_atexit(th->ec, result);
621 break;
622
623 case thread_invoke_type_func:
624 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
625 break;
626
627 case thread_invoke_type_none:
628 rb_bug("unreachable");
629 }
630
631 return result;
632}
633
634void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
635
636static int
637thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
638{
639 STACK_GROW_DIR_DETECTION;
640
641 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
642 VM_ASSERT(th != th->vm->ractor.main_thread);
643
644 enum ruby_tag_type state;
645 VALUE errinfo = Qnil;
646 rb_thread_t *ractor_main_th = th->ractor->threads.main;
647
648 // setup ractor
649 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
650 RB_VM_LOCK();
651 {
652 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
653 rb_ractor_t *r = th->ractor;
654 r->r_stdin = rb_io_prep_stdin();
655 r->r_stdout = rb_io_prep_stdout();
656 r->r_stderr = rb_io_prep_stderr();
657 }
658 RB_VM_UNLOCK();
659 }
660
661 // Ensure that we are not joinable.
662 VM_ASSERT(UNDEF_P(th->value));
663
664 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
665 VALUE result = Qundef;
666
667 EC_PUSH_TAG(th->ec);
668
669 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
670 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
671
672 SAVE_ROOT_JMPBUF(th, result = thread_do_start(th));
673 }
674
675 if (!fiber_scheduler_closed) {
676 fiber_scheduler_closed = 1;
678 }
679
680 if (!event_thread_end_hooked) {
681 event_thread_end_hooked = 1;
682 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
683 }
684
685 if (state == TAG_NONE) {
686 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
687 th->value = result;
688 } else {
689 errinfo = th->ec->errinfo;
690
691 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
692 if (!NIL_P(exc)) errinfo = exc;
693
694 if (state == TAG_FATAL) {
695 if (th->invoke_type == thread_invoke_type_ractor_proc) {
696 rb_ractor_atexit(th->ec, Qnil);
697 }
698 /* fatal error within this thread, need to stop whole script */
699 }
700 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
701 /* exit on main_thread. */
702 }
703 else {
704 if (th->report_on_exception) {
705 VALUE mesg = rb_thread_to_s(th->self);
706 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
707 rb_write_error_str(mesg);
708 rb_ec_error_print(th->ec, errinfo);
709 }
710
711 if (th->invoke_type == thread_invoke_type_ractor_proc) {
712 rb_ractor_atexit_exception(th->ec);
713 }
714
715 if (th->vm->thread_abort_on_exception ||
716 th->abort_on_exception || RTEST(ruby_debug)) {
717 /* exit on main_thread */
718 }
719 else {
720 errinfo = Qnil;
721 }
722 }
723 th->value = Qnil;
724 }
725
726 // The thread is effectively finished and can be joined.
727 VM_ASSERT(!UNDEF_P(th->value));
728
729 rb_threadptr_join_list_wakeup(th);
730 rb_threadptr_unlock_all_locking_mutexes(th);
731
732 if (th->invoke_type == thread_invoke_type_ractor_proc) {
733 rb_thread_terminate_all(th);
734 rb_ractor_teardown(th->ec);
735 }
736
737 th->status = THREAD_KILLED;
738 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
739
740 if (th->vm->ractor.main_thread == th) {
741 ruby_stop(0);
742 }
743
744 if (RB_TYPE_P(errinfo, T_OBJECT)) {
745 /* treat with normal error object */
746 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
747 }
748
749 EC_POP_TAG();
750
751 rb_ec_clear_current_thread_trace_func(th->ec);
752
753 /* locking_mutex must be Qfalse */
754 if (th->locking_mutex != Qfalse) {
755 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
756 (void *)th, th->locking_mutex);
757 }
758
759 if (ractor_main_th->status == THREAD_KILLED &&
760 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
761 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
762 rb_threadptr_interrupt(ractor_main_th);
763 }
764
765 rb_check_deadlock(th->ractor);
766
767 rb_fiber_close(th->ec->fiber_ptr);
768
769 thread_cleanup_func(th, FALSE);
770 VM_ASSERT(th->ec->vm_stack == NULL);
771
772 if (th->invoke_type == thread_invoke_type_ractor_proc) {
773 // after rb_ractor_living_threads_remove()
774 // GC will happen anytime and this ractor can be collected (and destroy GVL).
775 // So gvl_release() should be before it.
776 thread_sched_to_dead(TH_SCHED(th), th);
777 rb_ractor_living_threads_remove(th->ractor, th);
778 }
779 else {
780 rb_ractor_living_threads_remove(th->ractor, th);
781 thread_sched_to_dead(TH_SCHED(th), th);
782 }
783
784 return 0;
785}
788 enum thread_invoke_type type;
789
790 // for normal proc thread
791 VALUE args;
792 VALUE proc;
793
794 // for ractor
795 rb_ractor_t *g;
796
797 // for func
798 VALUE (*fn)(void *);
799};
800
801static void thread_specific_storage_alloc(rb_thread_t *th);
802
803static VALUE
804thread_create_core(VALUE thval, struct thread_create_params *params)
805{
806 rb_execution_context_t *ec = GET_EC();
807 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
808 int err;
809
810 thread_specific_storage_alloc(th);
811
812 if (OBJ_FROZEN(current_th->thgroup)) {
813 rb_raise(rb_eThreadError,
814 "can't start a new thread (frozen ThreadGroup)");
815 }
816
817 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
818
819 switch (params->type) {
820 case thread_invoke_type_proc:
821 th->invoke_type = thread_invoke_type_proc;
822 th->invoke_arg.proc.args = params->args;
823 th->invoke_arg.proc.proc = params->proc;
824 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
825 break;
826
827 case thread_invoke_type_ractor_proc:
828#if RACTOR_CHECK_MODE > 0
829 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
830#endif
831 th->invoke_type = thread_invoke_type_ractor_proc;
832 th->ractor = params->g;
833 th->ractor->threads.main = th;
834 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
835 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
836 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
837 rb_ractor_send_parameters(ec, params->g, params->args);
838 break;
839
840 case thread_invoke_type_func:
841 th->invoke_type = thread_invoke_type_func;
842 th->invoke_arg.func.func = params->fn;
843 th->invoke_arg.func.arg = (void *)params->args;
844 break;
845
846 default:
847 rb_bug("unreachable");
848 }
849
850 th->priority = current_th->priority;
851 th->thgroup = current_th->thgroup;
852
853 th->pending_interrupt_queue = rb_ary_hidden_new(0);
854 th->pending_interrupt_queue_checked = 0;
855 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
856 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
857
858 rb_native_mutex_initialize(&th->interrupt_lock);
859
860 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
861
862 rb_ractor_living_threads_insert(th->ractor, th);
863
864 /* kick thread */
865 err = native_thread_create(th);
866 if (err) {
867 th->status = THREAD_KILLED;
868 rb_ractor_living_threads_remove(th->ractor, th);
869 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
870 }
871 return thval;
872}
873
874#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
875
876/*
877 * call-seq:
878 * Thread.new { ... } -> thread
879 * Thread.new(*args, &proc) -> thread
880 * Thread.new(*args) { |args| ... } -> thread
881 *
882 * Creates a new thread executing the given block.
883 *
884 * Any +args+ given to ::new will be passed to the block:
885 *
886 * arr = []
887 * a, b, c = 1, 2, 3
888 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
889 * arr #=> [1, 2, 3]
890 *
891 * A ThreadError exception is raised if ::new is called without a block.
892 *
893 * If you're going to subclass Thread, be sure to call super in your
894 * +initialize+ method, otherwise a ThreadError will be raised.
895 */
896static VALUE
897thread_s_new(int argc, VALUE *argv, VALUE klass)
898{
899 rb_thread_t *th;
900 VALUE thread = rb_thread_alloc(klass);
901
902 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
903 rb_raise(rb_eThreadError, "can't alloc thread");
904 }
905
906 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
907 th = rb_thread_ptr(thread);
908 if (!threadptr_initialized(th)) {
909 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
910 klass);
911 }
912 return thread;
913}
914
915/*
916 * call-seq:
917 * Thread.start([args]*) {|args| block } -> thread
918 * Thread.fork([args]*) {|args| block } -> thread
919 *
920 * Basically the same as ::new. However, if class Thread is subclassed, then
921 * calling +start+ in that subclass will not invoke the subclass's
922 * +initialize+ method.
923 */
924
925static VALUE
926thread_start(VALUE klass, VALUE args)
927{
928 struct thread_create_params params = {
929 .type = thread_invoke_type_proc,
930 .args = args,
931 .proc = rb_block_proc(),
932 };
933 return thread_create_core(rb_thread_alloc(klass), &params);
934}
935
936static VALUE
937threadptr_invoke_proc_location(rb_thread_t *th)
938{
939 if (th->invoke_type == thread_invoke_type_proc) {
940 return rb_proc_location(th->invoke_arg.proc.proc);
941 }
942 else {
943 return Qnil;
944 }
945}
946
947/* :nodoc: */
948static VALUE
949thread_initialize(VALUE thread, VALUE args)
950{
951 rb_thread_t *th = rb_thread_ptr(thread);
952
953 if (!rb_block_given_p()) {
954 rb_raise(rb_eThreadError, "must be called with a block");
955 }
956 else if (th->invoke_type != thread_invoke_type_none) {
957 VALUE loc = threadptr_invoke_proc_location(th);
958 if (!NIL_P(loc)) {
959 rb_raise(rb_eThreadError,
960 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
961 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
962 }
963 else {
964 rb_raise(rb_eThreadError, "already initialized thread");
965 }
966 }
967 else {
968 struct thread_create_params params = {
969 .type = thread_invoke_type_proc,
970 .args = args,
971 .proc = rb_block_proc(),
972 };
973 return thread_create_core(thread, &params);
974 }
975}
976
978rb_thread_create(VALUE (*fn)(void *), void *arg)
979{
980 struct thread_create_params params = {
981 .type = thread_invoke_type_func,
982 .fn = fn,
983 .args = (VALUE)arg,
984 };
985 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
986}
987
988VALUE
989rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
990{
991 struct thread_create_params params = {
992 .type = thread_invoke_type_ractor_proc,
993 .g = r,
994 .args = args,
995 .proc = proc,
996 };
997 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
998}
999
1001struct join_arg {
1002 struct rb_waiting_list *waiter;
1003 rb_thread_t *target;
1004 VALUE timeout;
1005 rb_hrtime_t *limit;
1006};
1007
1008static VALUE
1009remove_from_join_list(VALUE arg)
1010{
1011 struct join_arg *p = (struct join_arg *)arg;
1012 rb_thread_t *target_thread = p->target;
1013
1014 if (target_thread->status != THREAD_KILLED) {
1015 struct rb_waiting_list **join_list = &target_thread->join_list;
1016
1017 while (*join_list) {
1018 if (*join_list == p->waiter) {
1019 *join_list = (*join_list)->next;
1020 break;
1021 }
1022
1023 join_list = &(*join_list)->next;
1024 }
1025 }
1026
1027 return Qnil;
1028}
1029
1030static int
1031thread_finished(rb_thread_t *th)
1032{
1033 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1034}
1035
1036static VALUE
1037thread_join_sleep(VALUE arg)
1038{
1039 struct join_arg *p = (struct join_arg *)arg;
1040 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1041 rb_hrtime_t end = 0, *limit = p->limit;
1042
1043 if (limit) {
1044 end = rb_hrtime_add(*limit, rb_hrtime_now());
1045 }
1046
1047 while (!thread_finished(target_th)) {
1048 VALUE scheduler = rb_fiber_scheduler_current();
1049
1050 if (scheduler != Qnil) {
1051 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1052 // Check if the target thread is finished after blocking:
1053 if (thread_finished(target_th)) break;
1054 // Otherwise, a timeout occurred:
1055 else return Qfalse;
1056 }
1057 else if (!limit) {
1058 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1059 }
1060 else {
1061 if (hrtime_update_expire(limit, end)) {
1062 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1063 return Qfalse;
1064 }
1065 th->status = THREAD_STOPPED;
1066 native_sleep(th, limit);
1067 }
1068 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1069 th->status = THREAD_RUNNABLE;
1070
1071 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1072 }
1073
1074 return Qtrue;
1075}
1076
1077static VALUE
1078thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1079{
1080 rb_execution_context_t *ec = GET_EC();
1081 rb_thread_t *th = ec->thread_ptr;
1082 rb_fiber_t *fiber = ec->fiber_ptr;
1083
1084 if (th == target_th) {
1085 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1086 }
1087
1088 if (th->ractor->threads.main == target_th) {
1089 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1090 }
1091
1092 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1093
1094 if (target_th->status != THREAD_KILLED) {
1095 struct rb_waiting_list waiter;
1096 waiter.next = target_th->join_list;
1097 waiter.thread = th;
1098 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1099 target_th->join_list = &waiter;
1100
1101 struct join_arg arg;
1102 arg.waiter = &waiter;
1103 arg.target = target_th;
1104 arg.timeout = timeout;
1105 arg.limit = limit;
1106
1107 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1108 return Qnil;
1109 }
1110 }
1111
1112 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1113
1114 if (target_th->ec->errinfo != Qnil) {
1115 VALUE err = target_th->ec->errinfo;
1116
1117 if (FIXNUM_P(err)) {
1118 switch (err) {
1119 case INT2FIX(TAG_FATAL):
1120 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1121
1122 /* OK. killed. */
1123 break;
1124 default:
1125 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1126 }
1127 }
1128 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1129 rb_bug("thread_join: THROW_DATA should not reach here.");
1130 }
1131 else {
1132 /* normal exception */
1133 rb_exc_raise(err);
1134 }
1135 }
1136 return target_th->self;
1137}
1138
1139/*
1140 * call-seq:
1141 * thr.join -> thr
1142 * thr.join(limit) -> thr
1143 *
1144 * The calling thread will suspend execution and run this +thr+.
1145 *
1146 * Does not return until +thr+ exits or until the given +limit+ seconds have
1147 * passed.
1148 *
1149 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1150 * returned.
1151 *
1152 * Any threads not joined will be killed when the main program exits.
1153 *
1154 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1155 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1156 * will be processed at this time.
1157 *
1158 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1159 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1160 * x.join # Let thread x finish, thread a will be killed on exit.
1161 * #=> "axyz"
1162 *
1163 * The following example illustrates the +limit+ parameter.
1164 *
1165 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1166 * puts "Waiting" until y.join(0.15)
1167 *
1168 * This will produce:
1169 *
1170 * tick...
1171 * Waiting
1172 * tick...
1173 * Waiting
1174 * tick...
1175 * tick...
1176 */
1177
1178static VALUE
1179thread_join_m(int argc, VALUE *argv, VALUE self)
1180{
1181 VALUE timeout = Qnil;
1182 rb_hrtime_t rel = 0, *limit = 0;
1183
1184 if (rb_check_arity(argc, 0, 1)) {
1185 timeout = argv[0];
1186 }
1187
1188 // Convert the timeout eagerly, so it's always converted and deterministic
1189 /*
1190 * This supports INFINITY and negative values, so we can't use
1191 * rb_time_interval right now...
1192 */
1193 if (NIL_P(timeout)) {
1194 /* unlimited */
1195 }
1196 else if (FIXNUM_P(timeout)) {
1197 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1198 limit = &rel;
1199 }
1200 else {
1201 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1202 }
1203
1204 return thread_join(rb_thread_ptr(self), timeout, limit);
1205}
1206
1207/*
1208 * call-seq:
1209 * thr.value -> obj
1210 *
1211 * Waits for +thr+ to complete, using #join, and returns its value or raises
1212 * the exception which terminated the thread.
1213 *
1214 * a = Thread.new { 2 + 2 }
1215 * a.value #=> 4
1216 *
1217 * b = Thread.new { raise 'something went wrong' }
1218 * b.value #=> RuntimeError: something went wrong
1219 */
1220
1221static VALUE
1222thread_value(VALUE self)
1223{
1224 rb_thread_t *th = rb_thread_ptr(self);
1225 thread_join(th, Qnil, 0);
1226 if (UNDEF_P(th->value)) {
1227 // If the thread is dead because we forked th->value is still Qundef.
1228 return Qnil;
1229 }
1230 return th->value;
1231}
1232
1233/*
1234 * Thread Scheduling
1235 */
1236
1237static void
1238getclockofday(struct timespec *ts)
1239{
1240#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1241 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1242 return;
1243#endif
1244 rb_timespec_now(ts);
1245}
1246
1247/*
1248 * Don't inline this, since library call is already time consuming
1249 * and we don't want "struct timespec" on stack too long for GC
1250 */
1251NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1252rb_hrtime_t
1253rb_hrtime_now(void)
1254{
1255 struct timespec ts;
1256
1257 getclockofday(&ts);
1258 return rb_timespec2hrtime(&ts);
1259}
1260
1261/*
1262 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1263 * being uninitialized, maybe other versions, too.
1264 */
1265COMPILER_WARNING_PUSH
1266#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1267COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1268#endif
1269#ifndef PRIu64
1270#define PRIu64 PRI_64_PREFIX "u"
1271#endif
1272/*
1273 * @end is the absolute time when @ts is set to expire
1274 * Returns true if @end has past
1275 * Updates @ts and returns false otherwise
1276 */
1277static int
1278hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1279{
1280 rb_hrtime_t now = rb_hrtime_now();
1281
1282 if (now > end) return 1;
1283
1284 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1285
1286 *timeout = end - now;
1287 return 0;
1288}
1289COMPILER_WARNING_POP
1290
1291static int
1292sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1293{
1294 enum rb_thread_status prev_status = th->status;
1295 int woke;
1296 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1297
1298 th->status = THREAD_STOPPED;
1299 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1300 while (th->status == THREAD_STOPPED) {
1301 native_sleep(th, &rel);
1302 woke = vm_check_ints_blocking(th->ec);
1303 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1304 break;
1305 if (hrtime_update_expire(&rel, end))
1306 break;
1307 woke = 1;
1308 }
1309 th->status = prev_status;
1310 return woke;
1311}
1312
1313static int
1314sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1315{
1316 enum rb_thread_status prev_status = th->status;
1317 int woke;
1318 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1319
1320 th->status = THREAD_STOPPED;
1321 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1322 while (th->status == THREAD_STOPPED) {
1323 native_sleep(th, &rel);
1324 woke = vm_check_ints_blocking(th->ec);
1325 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1326 break;
1327 if (hrtime_update_expire(&rel, end))
1328 break;
1329 woke = 1;
1330 }
1331 th->status = prev_status;
1332 return woke;
1333}
1334
1335static void
1336sleep_forever(rb_thread_t *th, unsigned int fl)
1337{
1338 enum rb_thread_status prev_status = th->status;
1339 enum rb_thread_status status;
1340 int woke;
1341
1342 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1343 th->status = status;
1344
1345 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1346
1347 while (th->status == status) {
1348 if (fl & SLEEP_DEADLOCKABLE) {
1349 rb_ractor_sleeper_threads_inc(th->ractor);
1350 rb_check_deadlock(th->ractor);
1351 }
1352 {
1353 native_sleep(th, 0);
1354 }
1355 if (fl & SLEEP_DEADLOCKABLE) {
1356 rb_ractor_sleeper_threads_dec(th->ractor);
1357 }
1358 if (fl & SLEEP_ALLOW_SPURIOUS) {
1359 break;
1360 }
1361
1362 woke = vm_check_ints_blocking(th->ec);
1363
1364 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1365 break;
1366 }
1367 }
1368 th->status = prev_status;
1369}
1370
1371void
1373{
1374 RUBY_DEBUG_LOG("forever");
1375 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1376}
1377
1378void
1380{
1381 RUBY_DEBUG_LOG("deadly");
1382 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1383}
1384
1385static void
1386rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1387{
1388 VALUE scheduler = rb_fiber_scheduler_current();
1389 if (scheduler != Qnil) {
1390 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1391 }
1392 else {
1393 RUBY_DEBUG_LOG("...");
1394 if (end) {
1395 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1396 }
1397 else {
1398 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1399 }
1400 }
1401}
1402
1403void
1404rb_thread_wait_for(struct timeval time)
1405{
1406 rb_thread_t *th = GET_THREAD();
1407
1408 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1409}
1410
1411/*
1412 * CAUTION: This function causes thread switching.
1413 * rb_thread_check_ints() check ruby's interrupts.
1414 * some interrupt needs thread switching/invoke handlers,
1415 * and so on.
1416 */
1417
1418void
1420{
1421 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1422}
1423
1424/*
1425 * Hidden API for tcl/tk wrapper.
1426 * There is no guarantee to perpetuate it.
1427 */
1428int
1429rb_thread_check_trap_pending(void)
1430{
1431 return rb_signal_buff_size() != 0;
1432}
1433
1434/* This function can be called in blocking region. */
1437{
1438 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1439}
1440
1441void
1442rb_thread_sleep(int sec)
1443{
1445}
1446
1447static void
1448rb_thread_schedule_limits(uint32_t limits_us)
1449{
1450 if (!rb_thread_alone()) {
1451 rb_thread_t *th = GET_THREAD();
1452 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1453
1454 if (th->running_time_us >= limits_us) {
1455 RUBY_DEBUG_LOG("switch %s", "start");
1456
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1458 thread_sched_yield(TH_SCHED(th), th);
1459 rb_ractor_thread_switch(th->ractor, th);
1460
1461 RUBY_DEBUG_LOG("switch %s", "done");
1462 }
1463 }
1464}
1465
1466void
1468{
1469 rb_thread_schedule_limits(0);
1470 RUBY_VM_CHECK_INTS(GET_EC());
1471}
1472
1473/* blocking region */
1474
1475static inline int
1476blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1477 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1478{
1479#ifdef RUBY_VM_CRITICAL_SECTION
1480 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1481#endif
1482 VM_ASSERT(th == GET_THREAD());
1483
1484 region->prev_status = th->status;
1485 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1486 th->blocking_region_buffer = region;
1487 th->status = THREAD_STOPPED;
1488 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1489
1490 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1491 return TRUE;
1492 }
1493 else {
1494 return FALSE;
1495 }
1496}
1497
1498static inline void
1499blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1500{
1501 /* entry to ubf_list still permitted at this point, make it impossible: */
1502 unblock_function_clear(th);
1503 /* entry to ubf_list impossible at this point, so unregister is safe: */
1504 unregister_ubf_list(th);
1505
1506 thread_sched_to_running(TH_SCHED(th), th);
1507 rb_ractor_thread_switch(th->ractor, th);
1508
1509 th->blocking_region_buffer = 0;
1510 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1511 if (th->status == THREAD_STOPPED) {
1512 th->status = region->prev_status;
1513 }
1514
1515 RUBY_DEBUG_LOG("end");
1516
1517#ifndef _WIN32
1518 // GET_THREAD() clears WSAGetLastError()
1519 VM_ASSERT(th == GET_THREAD());
1520#endif
1521}
1522
1523void *
1524rb_nogvl(void *(*func)(void *), void *data1,
1525 rb_unblock_function_t *ubf, void *data2,
1526 int flags)
1527{
1528 void *val = 0;
1529 rb_execution_context_t *ec = GET_EC();
1530 rb_thread_t *th = rb_ec_thread_ptr(ec);
1531 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1532 bool is_main_thread = vm->ractor.main_thread == th;
1533 int saved_errno = 0;
1534 VALUE ubf_th = Qfalse;
1535
1536 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1537 ubf = ubf_select;
1538 data2 = th;
1539 }
1540 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1541 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1542 vm->ubf_async_safe = 1;
1543 }
1544 }
1545
1546 BLOCKING_REGION(th, {
1547 val = func(data1);
1548 saved_errno = rb_errno();
1549 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1550
1551 if (is_main_thread) vm->ubf_async_safe = 0;
1552
1553 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1554 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1555 }
1556
1557 if (ubf_th != Qfalse) {
1558 thread_value(rb_thread_kill(ubf_th));
1559 }
1560
1561 rb_errno_set(saved_errno);
1562
1563 return val;
1564}
1565
1566/*
1567 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1568 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1569 * without interrupt process.
1570 *
1571 * rb_thread_call_without_gvl() does:
1572 * (1) Check interrupts.
1573 * (2) release GVL.
1574 * Other Ruby threads may run in parallel.
1575 * (3) call func with data1
1576 * (4) acquire GVL.
1577 * Other Ruby threads can not run in parallel any more.
1578 * (5) Check interrupts.
1579 *
1580 * rb_thread_call_without_gvl2() does:
1581 * (1) Check interrupt and return if interrupted.
1582 * (2) release GVL.
1583 * (3) call func with data1 and a pointer to the flags.
1584 * (4) acquire GVL.
1585 *
1586 * If another thread interrupts this thread (Thread#kill, signal delivery,
1587 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1588 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1589 * toggling a cancellation flag, canceling the invocation of a call inside
1590 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1591 *
1592 * There are built-in ubfs and you can specify these ubfs:
1593 *
1594 * * RUBY_UBF_IO: ubf for IO operation
1595 * * RUBY_UBF_PROCESS: ubf for process operation
1596 *
1597 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1598 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1599 * provide proper ubf(), your program will not stop for Control+C or other
1600 * shutdown events.
1601 *
1602 * "Check interrupts" on above list means checking asynchronous
1603 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1604 * request, and so on) and calling corresponding procedures
1605 * (such as `trap' for signals, raise an exception for Thread#raise).
1606 * If `func()' finished and received interrupts, you may skip interrupt
1607 * checking. For example, assume the following func() it reads data from file.
1608 *
1609 * read_func(...) {
1610 * // (a) before read
1611 * read(buffer); // (b) reading
1612 * // (c) after read
1613 * }
1614 *
1615 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1616 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1617 * at (c), after *read* operation is completed, checking interrupts is harmful
1618 * because it causes irrevocable side-effect, the read data will vanish. To
1619 * avoid such problem, the `read_func()' should be used with
1620 * `rb_thread_call_without_gvl2()'.
1621 *
1622 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1623 * immediately. This function does not show when the execution was interrupted.
1624 * For example, there are 4 possible timing (a), (b), (c) and before calling
1625 * read_func(). You need to record progress of a read_func() and check
1626 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1627 * `rb_thread_check_ints()' correctly or your program can not process proper
1628 * process such as `trap' and so on.
1629 *
1630 * NOTE: You can not execute most of Ruby C API and touch Ruby
1631 * objects in `func()' and `ubf()', including raising an
1632 * exception, because current thread doesn't acquire GVL
1633 * (it causes synchronization problems). If you need to
1634 * call ruby functions either use rb_thread_call_with_gvl()
1635 * or read source code of C APIs and confirm safety by
1636 * yourself.
1637 *
1638 * NOTE: In short, this API is difficult to use safely. I recommend you
1639 * use other ways if you have. We lack experiences to use this API.
1640 * Please report your problem related on it.
1641 *
1642 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1643 * for a short running `func()'. Be sure to benchmark and use this
1644 * mechanism when `func()' consumes enough time.
1645 *
1646 * Safe C API:
1647 * * rb_thread_interrupted() - check interrupt flag
1648 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1649 * they will work without GVL, and may acquire GVL when GC is needed.
1650 */
1651void *
1652rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1653 rb_unblock_function_t *ubf, void *data2)
1654{
1655 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1656}
1657
1658void *
1659rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1660 rb_unblock_function_t *ubf, void *data2)
1661{
1662 return rb_nogvl(func, data1, ubf, data2, 0);
1663}
1664
1665static int
1666waitfd_to_waiting_flag(int wfd_event)
1667{
1668 return wfd_event << 1;
1669}
1670
1671static void
1672thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1673{
1674 wfd->fd = fd;
1675 wfd->th = th;
1676 wfd->busy = NULL;
1677
1678 RB_VM_LOCK_ENTER();
1679 {
1680 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1681 }
1682 RB_VM_LOCK_LEAVE();
1683}
1684
1685static void
1686thread_io_wake_pending_closer(struct waiting_fd *wfd)
1687{
1688 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1689 if (has_waiter) {
1690 rb_mutex_lock(wfd->busy->wakeup_mutex);
1691 }
1692
1693 /* Needs to be protected with RB_VM_LOCK because we don't know if
1694 wfd is on the global list of pending FD ops or if it's on a
1695 struct rb_io_close_wait_list close-waiter. */
1696 RB_VM_LOCK_ENTER();
1697 ccan_list_del(&wfd->wfd_node);
1698 RB_VM_LOCK_LEAVE();
1699
1700 if (has_waiter) {
1701 rb_thread_wakeup(wfd->busy->closing_thread);
1702 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1703 }
1704}
1705
1706static int
1707thread_io_wait_events(rb_thread_t *th, rb_execution_context_t *ec, int fd, int events, struct timeval *timeout, struct waiting_fd *wfd)
1708{
1709#if defined(USE_MN_THREADS) && USE_MN_THREADS
1710 if (!th_has_dedicated_nt(th) &&
1711 (events || timeout) &&
1712 th->blocking // no fiber scheduler
1713 ) {
1714 int r;
1715 rb_hrtime_t rel, *prel;
1716
1717 if (timeout) {
1718 rel = rb_timeval2hrtime(timeout);
1719 prel = &rel;
1720 }
1721 else {
1722 prel = NULL;
1723 }
1724
1725 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1726
1727 thread_io_setup_wfd(th, fd, wfd);
1728 {
1729 // wait readable/writable
1730 r = thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel);
1731 }
1732 thread_io_wake_pending_closer(wfd);
1733
1734 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1735
1736 return r;
1737 }
1738#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1739
1740 return 0;
1741}
1742
1743VALUE
1744rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1745{
1746 rb_execution_context_t * volatile ec = GET_EC();
1747 rb_thread_t *th = rb_ec_thread_ptr(ec);
1748
1749 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1750
1751 struct waiting_fd waiting_fd;
1752
1753 thread_io_wait_events(th, ec, fd, events, NULL, &waiting_fd);
1754
1755 volatile VALUE val = Qundef; /* shouldn't be used */
1756 volatile int saved_errno = 0;
1757 enum ruby_tag_type state;
1758
1759 // `errno` is only valid when there is an actual error - but we can't
1760 // extract that from the return value of `func` alone, so we clear any
1761 // prior `errno` value here so that we can later check if it was set by
1762 // `func` or not (as opposed to some previously set value).
1763 errno = 0;
1764
1765 thread_io_setup_wfd(th, fd, &waiting_fd);
1766
1767 EC_PUSH_TAG(ec);
1768 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1769 BLOCKING_REGION(waiting_fd.th, {
1770 val = func(data1);
1771 saved_errno = errno;
1772 }, ubf_select, waiting_fd.th, FALSE);
1773 }
1774 EC_POP_TAG();
1775
1776 /*
1777 * must be deleted before jump
1778 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1779 */
1780 thread_io_wake_pending_closer(&waiting_fd);
1781
1782 if (state) {
1783 EC_JUMP_TAG(ec, state);
1784 }
1785 /* TODO: check func() */
1786 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1787
1788 // If the error was a timeout, we raise a specific exception for that:
1789 if (saved_errno == ETIMEDOUT) {
1790 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1791 }
1792
1793 errno = saved_errno;
1794
1795 return val;
1796}
1797
1798VALUE
1799rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1800{
1801 return rb_thread_io_blocking_call(func, data1, fd, 0);
1802}
1803
1804/*
1805 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1806 *
1807 * After releasing GVL using
1808 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1809 * methods. If you need to access Ruby you must use this function
1810 * rb_thread_call_with_gvl().
1811 *
1812 * This function rb_thread_call_with_gvl() does:
1813 * (1) acquire GVL.
1814 * (2) call passed function `func'.
1815 * (3) release GVL.
1816 * (4) return a value which is returned at (2).
1817 *
1818 * NOTE: You should not return Ruby object at (2) because such Object
1819 * will not be marked.
1820 *
1821 * NOTE: If an exception is raised in `func', this function DOES NOT
1822 * protect (catch) the exception. If you have any resources
1823 * which should free before throwing exception, you need use
1824 * rb_protect() in `func' and return a value which represents
1825 * exception was raised.
1826 *
1827 * NOTE: This function should not be called by a thread which was not
1828 * created as Ruby thread (created by Thread.new or so). In other
1829 * words, this function *DOES NOT* associate or convert a NON-Ruby
1830 * thread to a Ruby thread.
1831 */
1832void *
1833rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1834{
1835 rb_thread_t *th = ruby_thread_from_native();
1836 struct rb_blocking_region_buffer *brb;
1837 struct rb_unblock_callback prev_unblock;
1838 void *r;
1839
1840 if (th == 0) {
1841 /* Error has occurred, but we can't use rb_bug()
1842 * because this thread is not Ruby's thread.
1843 * What should we do?
1844 */
1845 bp();
1846 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1847 exit(EXIT_FAILURE);
1848 }
1849
1850 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1851 prev_unblock = th->unblock;
1852
1853 if (brb == 0) {
1854 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1855 }
1856
1857 blocking_region_end(th, brb);
1858 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1859 r = (*func)(data1);
1860 /* leave from Ruby world: You can not access Ruby values, etc. */
1861 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1862 RUBY_ASSERT_ALWAYS(released);
1863 RB_VM_SAVE_MACHINE_CONTEXT(th);
1864 thread_sched_to_waiting(TH_SCHED(th), th);
1865 return r;
1866}
1867
1868/*
1869 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1870 *
1871 ***
1872 *** This API is EXPERIMENTAL!
1873 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1874 ***
1875 */
1876
1877int
1878ruby_thread_has_gvl_p(void)
1879{
1880 rb_thread_t *th = ruby_thread_from_native();
1881
1882 if (th && th->blocking_region_buffer == 0) {
1883 return 1;
1884 }
1885 else {
1886 return 0;
1887 }
1888}
1889
1890/*
1891 * call-seq:
1892 * Thread.pass -> nil
1893 *
1894 * Give the thread scheduler a hint to pass execution to another thread.
1895 * A running thread may or may not switch, it depends on OS and processor.
1896 */
1897
1898static VALUE
1899thread_s_pass(VALUE klass)
1900{
1902 return Qnil;
1903}
1904
1905/*****************************************************/
1906
1907/*
1908 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1909 *
1910 * Async events such as an exception thrown by Thread#raise,
1911 * Thread#kill and thread termination (after main thread termination)
1912 * will be queued to th->pending_interrupt_queue.
1913 * - clear: clear the queue.
1914 * - enque: enqueue err object into queue.
1915 * - deque: dequeue err object from queue.
1916 * - active_p: return 1 if the queue should be checked.
1917 *
1918 * All rb_threadptr_pending_interrupt_* functions are called by
1919 * a GVL acquired thread, of course.
1920 * Note that all "rb_" prefix APIs need GVL to call.
1921 */
1922
1923void
1924rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1925{
1926 rb_ary_clear(th->pending_interrupt_queue);
1927}
1928
1929void
1930rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1931{
1932 rb_ary_push(th->pending_interrupt_queue, v);
1933 th->pending_interrupt_queue_checked = 0;
1934}
1935
1936static void
1937threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1938{
1939 if (!th->pending_interrupt_queue) {
1940 rb_raise(rb_eThreadError, "uninitialized thread");
1941 }
1942}
1943
1944enum handle_interrupt_timing {
1945 INTERRUPT_NONE,
1946 INTERRUPT_IMMEDIATE,
1947 INTERRUPT_ON_BLOCKING,
1948 INTERRUPT_NEVER
1949};
1950
1951static enum handle_interrupt_timing
1952rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
1953{
1954 if (sym == sym_immediate) {
1955 return INTERRUPT_IMMEDIATE;
1956 }
1957 else if (sym == sym_on_blocking) {
1958 return INTERRUPT_ON_BLOCKING;
1959 }
1960 else if (sym == sym_never) {
1961 return INTERRUPT_NEVER;
1962 }
1963 else {
1964 rb_raise(rb_eThreadError, "unknown mask signature");
1965 }
1966}
1967
1968static enum handle_interrupt_timing
1969rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1970{
1971 VALUE mask;
1972 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1973 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1974 VALUE mod;
1975 long i;
1976
1977 for (i=0; i<mask_stack_len; i++) {
1978 mask = mask_stack[mask_stack_len-(i+1)];
1979
1980 if (SYMBOL_P(mask)) {
1981 /* do not match RUBY_FATAL_THREAD_KILLED etc */
1982 if (err != rb_cInteger) {
1983 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
1984 }
1985 else {
1986 continue;
1987 }
1988 }
1989
1990 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1991 VALUE klass = mod;
1992 VALUE sym;
1993
1994 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1995 klass = RBASIC(mod)->klass;
1996 }
1997 else if (mod != RCLASS_ORIGIN(mod)) {
1998 continue;
1999 }
2000
2001 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2002 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2003 }
2004 }
2005 /* try to next mask */
2006 }
2007 return INTERRUPT_NONE;
2008}
2009
2010static int
2011rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2012{
2013 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2014}
2015
2016static int
2017rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2018{
2019 int i;
2020 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2021 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2022 if (rb_obj_is_kind_of(e, err)) {
2023 return TRUE;
2024 }
2025 }
2026 return FALSE;
2027}
2028
2029static VALUE
2030rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2031{
2032#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2033 int i;
2034
2035 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2036 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2037
2038 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2039
2040 switch (mask_timing) {
2041 case INTERRUPT_ON_BLOCKING:
2042 if (timing != INTERRUPT_ON_BLOCKING) {
2043 break;
2044 }
2045 /* fall through */
2046 case INTERRUPT_NONE: /* default: IMMEDIATE */
2047 case INTERRUPT_IMMEDIATE:
2048 rb_ary_delete_at(th->pending_interrupt_queue, i);
2049 return err;
2050 case INTERRUPT_NEVER:
2051 break;
2052 }
2053 }
2054
2055 th->pending_interrupt_queue_checked = 1;
2056 return Qundef;
2057#else
2058 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2059 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2060 th->pending_interrupt_queue_checked = 1;
2061 }
2062 return err;
2063#endif
2064}
2065
2066static int
2067threadptr_pending_interrupt_active_p(rb_thread_t *th)
2068{
2069 /*
2070 * For optimization, we don't check async errinfo queue
2071 * if the queue and the thread interrupt mask were not changed
2072 * since last check.
2073 */
2074 if (th->pending_interrupt_queue_checked) {
2075 return 0;
2076 }
2077
2078 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2079 return 0;
2080 }
2081
2082 return 1;
2083}
2084
2085static int
2086handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2087{
2088 VALUE *maskp = (VALUE *)args;
2089
2090 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2091 rb_raise(rb_eArgError, "unknown mask signature");
2092 }
2093
2094 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2095 *maskp = val;
2096 return ST_CONTINUE;
2097 }
2098
2099 if (RTEST(*maskp)) {
2100 if (!RB_TYPE_P(*maskp, T_HASH)) {
2101 VALUE prev = *maskp;
2102 *maskp = rb_ident_hash_new();
2103 if (SYMBOL_P(prev)) {
2104 rb_hash_aset(*maskp, rb_eException, prev);
2105 }
2106 }
2107 rb_hash_aset(*maskp, key, val);
2108 }
2109 else {
2110 *maskp = Qfalse;
2111 }
2112
2113 return ST_CONTINUE;
2114}
2115
2116/*
2117 * call-seq:
2118 * Thread.handle_interrupt(hash) { ... } -> result of the block
2119 *
2120 * Changes asynchronous interrupt timing.
2121 *
2122 * _interrupt_ means asynchronous event and corresponding procedure
2123 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2124 * and main thread termination (if main thread terminates, then all
2125 * other thread will be killed).
2126 *
2127 * The given +hash+ has pairs like <code>ExceptionClass =>
2128 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2129 * the given block. The TimingSymbol can be one of the following symbols:
2130 *
2131 * [+:immediate+] Invoke interrupts immediately.
2132 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2133 * [+:never+] Never invoke all interrupts.
2134 *
2135 * _BlockingOperation_ means that the operation will block the calling thread,
2136 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2137 * operation executed without GVL.
2138 *
2139 * Masked asynchronous interrupts are delayed until they are enabled.
2140 * This method is similar to sigprocmask(3).
2141 *
2142 * === NOTE
2143 *
2144 * Asynchronous interrupts are difficult to use.
2145 *
2146 * If you need to communicate between threads, please consider to use another way such as Queue.
2147 *
2148 * Or use them with deep understanding about this method.
2149 *
2150 * === Usage
2151 *
2152 * In this example, we can guard from Thread#raise exceptions.
2153 *
2154 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2155 * ignored in the first block of the main thread. In the second
2156 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2157 *
2158 * th = Thread.new do
2159 * Thread.handle_interrupt(RuntimeError => :never) {
2160 * begin
2161 * # You can write resource allocation code safely.
2162 * Thread.handle_interrupt(RuntimeError => :immediate) {
2163 * # ...
2164 * }
2165 * ensure
2166 * # You can write resource deallocation code safely.
2167 * end
2168 * }
2169 * end
2170 * Thread.pass
2171 * # ...
2172 * th.raise "stop"
2173 *
2174 * While we are ignoring the RuntimeError exception, it's safe to write our
2175 * resource allocation code. Then, the ensure block is where we can safely
2176 * deallocate your resources.
2177 *
2178 * ==== Guarding from Timeout::Error
2179 *
2180 * In the next example, we will guard from the Timeout::Error exception. This
2181 * will help prevent from leaking resources when Timeout::Error exceptions occur
2182 * during normal ensure clause. For this example we use the help of the
2183 * standard library Timeout, from lib/timeout.rb
2184 *
2185 * require 'timeout'
2186 * Thread.handle_interrupt(Timeout::Error => :never) {
2187 * timeout(10){
2188 * # Timeout::Error doesn't occur here
2189 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2190 * # possible to be killed by Timeout::Error
2191 * # while blocking operation
2192 * }
2193 * # Timeout::Error doesn't occur here
2194 * }
2195 * }
2196 *
2197 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2198 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2199 * operation that will block the calling thread is susceptible to a
2200 * Timeout::Error exception being raised.
2201 *
2202 * ==== Stack control settings
2203 *
2204 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2205 * to control more than one ExceptionClass and TimingSymbol at a time.
2206 *
2207 * Thread.handle_interrupt(FooError => :never) {
2208 * Thread.handle_interrupt(BarError => :never) {
2209 * # FooError and BarError are prohibited.
2210 * }
2211 * }
2212 *
2213 * ==== Inheritance with ExceptionClass
2214 *
2215 * All exceptions inherited from the ExceptionClass parameter will be considered.
2216 *
2217 * Thread.handle_interrupt(Exception => :never) {
2218 * # all exceptions inherited from Exception are prohibited.
2219 * }
2220 *
2221 * For handling all interrupts, use +Object+ and not +Exception+
2222 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2223 */
2224static VALUE
2225rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2226{
2227 VALUE mask = Qundef;
2228 rb_execution_context_t * volatile ec = GET_EC();
2229 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2230 volatile VALUE r = Qnil;
2231 enum ruby_tag_type state;
2232
2233 if (!rb_block_given_p()) {
2234 rb_raise(rb_eArgError, "block is needed.");
2235 }
2236
2237 mask_arg = rb_to_hash_type(mask_arg);
2238
2239 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2240 mask = Qnil;
2241 }
2242
2243 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2244
2245 if (UNDEF_P(mask)) {
2246 return rb_yield(Qnil);
2247 }
2248
2249 if (!RTEST(mask)) {
2250 mask = mask_arg;
2251 }
2252 else if (RB_TYPE_P(mask, T_HASH)) {
2253 OBJ_FREEZE_RAW(mask);
2254 }
2255
2256 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2257 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2258 th->pending_interrupt_queue_checked = 0;
2259 RUBY_VM_SET_INTERRUPT(th->ec);
2260 }
2261
2262 EC_PUSH_TAG(th->ec);
2263 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2264 r = rb_yield(Qnil);
2265 }
2266 EC_POP_TAG();
2267
2268 rb_ary_pop(th->pending_interrupt_mask_stack);
2269 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2270 th->pending_interrupt_queue_checked = 0;
2271 RUBY_VM_SET_INTERRUPT(th->ec);
2272 }
2273
2274 RUBY_VM_CHECK_INTS(th->ec);
2275
2276 if (state) {
2277 EC_JUMP_TAG(th->ec, state);
2278 }
2279
2280 return r;
2281}
2282
2283/*
2284 * call-seq:
2285 * target_thread.pending_interrupt?(error = nil) -> true/false
2286 *
2287 * Returns whether or not the asynchronous queue is empty for the target thread.
2288 *
2289 * If +error+ is given, then check only for +error+ type deferred events.
2290 *
2291 * See ::pending_interrupt? for more information.
2292 */
2293static VALUE
2294rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2295{
2296 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2297
2298 if (!target_th->pending_interrupt_queue) {
2299 return Qfalse;
2300 }
2301 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2302 return Qfalse;
2303 }
2304 if (rb_check_arity(argc, 0, 1)) {
2305 VALUE err = argv[0];
2306 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2307 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2308 }
2309 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2310 }
2311 else {
2312 return Qtrue;
2313 }
2314}
2315
2316/*
2317 * call-seq:
2318 * Thread.pending_interrupt?(error = nil) -> true/false
2319 *
2320 * Returns whether or not the asynchronous queue is empty.
2321 *
2322 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2323 * this method can be used to determine if there are any deferred events.
2324 *
2325 * If you find this method returns true, then you may finish +:never+ blocks.
2326 *
2327 * For example, the following method processes deferred asynchronous events
2328 * immediately.
2329 *
2330 * def Thread.kick_interrupt_immediately
2331 * Thread.handle_interrupt(Object => :immediate) {
2332 * Thread.pass
2333 * }
2334 * end
2335 *
2336 * If +error+ is given, then check only for +error+ type deferred events.
2337 *
2338 * === Usage
2339 *
2340 * th = Thread.new{
2341 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2342 * while true
2343 * ...
2344 * # reach safe point to invoke interrupt
2345 * if Thread.pending_interrupt?
2346 * Thread.handle_interrupt(Object => :immediate){}
2347 * end
2348 * ...
2349 * end
2350 * }
2351 * }
2352 * ...
2353 * th.raise # stop thread
2354 *
2355 * This example can also be written as the following, which you should use to
2356 * avoid asynchronous interrupts.
2357 *
2358 * flag = true
2359 * th = Thread.new{
2360 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2361 * while true
2362 * ...
2363 * # reach safe point to invoke interrupt
2364 * break if flag == false
2365 * ...
2366 * end
2367 * }
2368 * }
2369 * ...
2370 * flag = false # stop thread
2371 */
2372
2373static VALUE
2374rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2375{
2376 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2377}
2378
2379NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2380
2381static void
2382rb_threadptr_to_kill(rb_thread_t *th)
2383{
2384 rb_threadptr_pending_interrupt_clear(th);
2385 th->status = THREAD_RUNNABLE;
2386 th->to_kill = 1;
2387 th->ec->errinfo = INT2FIX(TAG_FATAL);
2388 EC_JUMP_TAG(th->ec, TAG_FATAL);
2389}
2390
2391static inline rb_atomic_t
2392threadptr_get_interrupts(rb_thread_t *th)
2393{
2394 rb_execution_context_t *ec = th->ec;
2395 rb_atomic_t interrupt;
2396 rb_atomic_t old;
2397
2398 do {
2399 interrupt = ec->interrupt_flag;
2400 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2401 } while (old != interrupt);
2402 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2403}
2404
2405int
2406rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2407{
2408 rb_atomic_t interrupt;
2409 int postponed_job_interrupt = 0;
2410 int ret = FALSE;
2411
2412 if (th->ec->raised_flag) return ret;
2413
2414 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2415 int sig;
2416 int timer_interrupt;
2417 int pending_interrupt;
2418 int trap_interrupt;
2419 int terminate_interrupt;
2420
2421 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2422 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2423 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2424 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2425 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2426
2427 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2428 RB_VM_LOCK_ENTER();
2429 RB_VM_LOCK_LEAVE();
2430 }
2431
2432 if (postponed_job_interrupt) {
2433 rb_postponed_job_flush(th->vm);
2434 }
2435
2436 /* signal handling */
2437 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2438 enum rb_thread_status prev_status = th->status;
2439
2440 th->status = THREAD_RUNNABLE;
2441 {
2442 while ((sig = rb_get_next_signal()) != 0) {
2443 ret |= rb_signal_exec(th, sig);
2444 }
2445 }
2446 th->status = prev_status;
2447 }
2448
2449 /* exception from another thread */
2450 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2451 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2452 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2453 ret = TRUE;
2454
2455 if (UNDEF_P(err)) {
2456 /* no error */
2457 }
2458 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2459 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2460 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2461 terminate_interrupt = 1;
2462 }
2463 else {
2464 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2465 /* the only special exception to be queued across thread */
2466 err = ruby_vm_special_exception_copy(err);
2467 }
2468 /* set runnable if th was slept. */
2469 if (th->status == THREAD_STOPPED ||
2470 th->status == THREAD_STOPPED_FOREVER)
2471 th->status = THREAD_RUNNABLE;
2472 rb_exc_raise(err);
2473 }
2474 }
2475
2476 if (terminate_interrupt) {
2477 rb_threadptr_to_kill(th);
2478 }
2479
2480 if (timer_interrupt) {
2481 uint32_t limits_us = TIME_QUANTUM_USEC;
2482
2483 if (th->priority > 0)
2484 limits_us <<= th->priority;
2485 else
2486 limits_us >>= -th->priority;
2487
2488 if (th->status == THREAD_RUNNABLE)
2489 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2490
2491 VM_ASSERT(th->ec->cfp);
2492 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2493 0, 0, 0, Qundef);
2494
2495 rb_thread_schedule_limits(limits_us);
2496 }
2497 }
2498 return ret;
2499}
2500
2501void
2502rb_thread_execute_interrupts(VALUE thval)
2503{
2504 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2505}
2506
2507static void
2508rb_threadptr_ready(rb_thread_t *th)
2509{
2510 rb_threadptr_interrupt(th);
2511}
2512
2513static VALUE
2514rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2515{
2516 VALUE exc;
2517
2518 if (rb_threadptr_dead(target_th)) {
2519 return Qnil;
2520 }
2521
2522 if (argc == 0) {
2523 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2524 }
2525 else {
2526 exc = rb_make_exception(argc, argv);
2527 }
2528
2529 /* making an exception object can switch thread,
2530 so we need to check thread deadness again */
2531 if (rb_threadptr_dead(target_th)) {
2532 return Qnil;
2533 }
2534
2535 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2536 rb_threadptr_pending_interrupt_enque(target_th, exc);
2537 rb_threadptr_interrupt(target_th);
2538 return Qnil;
2539}
2540
2541void
2542rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2543{
2544 VALUE argv[2];
2545
2546 argv[0] = rb_eSignal;
2547 argv[1] = INT2FIX(sig);
2548 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2549}
2550
2551void
2552rb_threadptr_signal_exit(rb_thread_t *th)
2553{
2554 VALUE argv[2];
2555
2556 argv[0] = rb_eSystemExit;
2557 argv[1] = rb_str_new2("exit");
2558
2559 // TODO: check signal raise deliverly
2560 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2561}
2562
2563int
2564rb_ec_set_raised(rb_execution_context_t *ec)
2565{
2566 if (ec->raised_flag & RAISED_EXCEPTION) {
2567 return 1;
2568 }
2569 ec->raised_flag |= RAISED_EXCEPTION;
2570 return 0;
2571}
2572
2573int
2574rb_ec_reset_raised(rb_execution_context_t *ec)
2575{
2576 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2577 return 0;
2578 }
2579 ec->raised_flag &= ~RAISED_EXCEPTION;
2580 return 1;
2581}
2582
2583int
2584rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2585{
2586 rb_vm_t *vm = GET_THREAD()->vm;
2587 struct waiting_fd *wfd = 0, *next;
2588 ccan_list_head_init(&busy->pending_fd_users);
2589 int has_any;
2590 VALUE wakeup_mutex;
2591
2592 RB_VM_LOCK_ENTER();
2593 {
2594 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2595 if (wfd->fd == fd) {
2596 rb_thread_t *th = wfd->th;
2597 VALUE err;
2598
2599 ccan_list_del(&wfd->wfd_node);
2600 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2601
2602 wfd->busy = busy;
2603 err = th->vm->special_exceptions[ruby_error_stream_closed];
2604 rb_threadptr_pending_interrupt_enque(th, err);
2605 rb_threadptr_interrupt(th);
2606 }
2607 }
2608 }
2609
2610 has_any = !ccan_list_empty(&busy->pending_fd_users);
2611 busy->closing_thread = rb_thread_current();
2612 wakeup_mutex = Qnil;
2613 if (has_any) {
2614 wakeup_mutex = rb_mutex_new();
2615 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2616 }
2617 busy->wakeup_mutex = wakeup_mutex;
2618
2619 RB_VM_LOCK_LEAVE();
2620
2621 /* If the caller didn't pass *busy as a pointer to something on the stack,
2622 we need to guard this mutex object on _our_ C stack for the duration
2623 of this function. */
2624 RB_GC_GUARD(wakeup_mutex);
2625 return has_any;
2626}
2627
2628void
2629rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2630{
2631 if (!RB_TEST(busy->wakeup_mutex)) {
2632 /* There was nobody else using this file when we closed it, so we
2633 never bothered to allocate a mutex*/
2634 return;
2635 }
2636
2637 rb_mutex_lock(busy->wakeup_mutex);
2638 while (!ccan_list_empty(&busy->pending_fd_users)) {
2639 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2640 }
2641 rb_mutex_unlock(busy->wakeup_mutex);
2642}
2643
2644void
2645rb_thread_fd_close(int fd)
2646{
2647 struct rb_io_close_wait_list busy;
2648
2649 if (rb_notify_fd_close(fd, &busy)) {
2650 rb_notify_fd_close_wait(&busy);
2651 }
2652}
2653
2654/*
2655 * call-seq:
2656 * thr.raise
2657 * thr.raise(string)
2658 * thr.raise(exception [, string [, array]])
2659 *
2660 * Raises an exception from the given thread. The caller does not have to be
2661 * +thr+. See Kernel#raise for more information.
2662 *
2663 * Thread.abort_on_exception = true
2664 * a = Thread.new { sleep(200) }
2665 * a.raise("Gotcha")
2666 *
2667 * This will produce:
2668 *
2669 * prog.rb:3: Gotcha (RuntimeError)
2670 * from prog.rb:2:in `initialize'
2671 * from prog.rb:2:in `new'
2672 * from prog.rb:2
2673 */
2674
2675static VALUE
2676thread_raise_m(int argc, VALUE *argv, VALUE self)
2677{
2678 rb_thread_t *target_th = rb_thread_ptr(self);
2679 const rb_thread_t *current_th = GET_THREAD();
2680
2681 threadptr_check_pending_interrupt_queue(target_th);
2682 rb_threadptr_raise(target_th, argc, argv);
2683
2684 /* To perform Thread.current.raise as Kernel.raise */
2685 if (current_th == target_th) {
2686 RUBY_VM_CHECK_INTS(target_th->ec);
2687 }
2688 return Qnil;
2689}
2690
2691
2692/*
2693 * call-seq:
2694 * thr.exit -> thr
2695 * thr.kill -> thr
2696 * thr.terminate -> thr
2697 *
2698 * Terminates +thr+ and schedules another thread to be run, returning
2699 * the terminated Thread. If this is the main thread, or the last
2700 * thread, exits the process.
2701 */
2702
2704rb_thread_kill(VALUE thread)
2705{
2706 rb_thread_t *target_th = rb_thread_ptr(thread);
2707
2708 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2709 return thread;
2710 }
2711 if (target_th == target_th->vm->ractor.main_thread) {
2712 rb_exit(EXIT_SUCCESS);
2713 }
2714
2715 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2716
2717 if (target_th == GET_THREAD()) {
2718 /* kill myself immediately */
2719 rb_threadptr_to_kill(target_th);
2720 }
2721 else {
2722 threadptr_check_pending_interrupt_queue(target_th);
2723 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2724 rb_threadptr_interrupt(target_th);
2725 }
2726
2727 return thread;
2728}
2729
2730int
2731rb_thread_to_be_killed(VALUE thread)
2732{
2733 rb_thread_t *target_th = rb_thread_ptr(thread);
2734
2735 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2736 return TRUE;
2737 }
2738 return FALSE;
2739}
2740
2741/*
2742 * call-seq:
2743 * Thread.kill(thread) -> thread
2744 *
2745 * Causes the given +thread+ to exit, see also Thread::exit.
2746 *
2747 * count = 0
2748 * a = Thread.new { loop { count += 1 } }
2749 * sleep(0.1) #=> 0
2750 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2751 * count #=> 93947
2752 * a.alive? #=> false
2753 */
2754
2755static VALUE
2756rb_thread_s_kill(VALUE obj, VALUE th)
2757{
2758 return rb_thread_kill(th);
2759}
2760
2761
2762/*
2763 * call-seq:
2764 * Thread.exit -> thread
2765 *
2766 * Terminates the currently running thread and schedules another thread to be
2767 * run.
2768 *
2769 * If this thread is already marked to be killed, ::exit returns the Thread.
2770 *
2771 * If this is the main thread, or the last thread, exit the process.
2772 */
2773
2774static VALUE
2775rb_thread_exit(VALUE _)
2776{
2777 rb_thread_t *th = GET_THREAD();
2778 return rb_thread_kill(th->self);
2779}
2780
2781
2782/*
2783 * call-seq:
2784 * thr.wakeup -> thr
2785 *
2786 * Marks a given thread as eligible for scheduling, however it may still
2787 * remain blocked on I/O.
2788 *
2789 * *Note:* This does not invoke the scheduler, see #run for more information.
2790 *
2791 * c = Thread.new { Thread.stop; puts "hey!" }
2792 * sleep 0.1 while c.status!='sleep'
2793 * c.wakeup
2794 * c.join
2795 * #=> "hey!"
2796 */
2797
2799rb_thread_wakeup(VALUE thread)
2800{
2801 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2802 rb_raise(rb_eThreadError, "killed thread");
2803 }
2804 return thread;
2805}
2806
2809{
2810 rb_thread_t *target_th = rb_thread_ptr(thread);
2811 if (target_th->status == THREAD_KILLED) return Qnil;
2812
2813 rb_threadptr_ready(target_th);
2814
2815 if (target_th->status == THREAD_STOPPED ||
2816 target_th->status == THREAD_STOPPED_FOREVER) {
2817 target_th->status = THREAD_RUNNABLE;
2818 }
2819
2820 return thread;
2821}
2822
2823
2824/*
2825 * call-seq:
2826 * thr.run -> thr
2827 *
2828 * Wakes up +thr+, making it eligible for scheduling.
2829 *
2830 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2831 * sleep 0.1 while a.status!='sleep'
2832 * puts "Got here"
2833 * a.run
2834 * a.join
2835 *
2836 * This will produce:
2837 *
2838 * a
2839 * Got here
2840 * c
2841 *
2842 * See also the instance method #wakeup.
2843 */
2844
2846rb_thread_run(VALUE thread)
2847{
2848 rb_thread_wakeup(thread);
2850 return thread;
2851}
2852
2853
2855rb_thread_stop(void)
2856{
2857 if (rb_thread_alone()) {
2858 rb_raise(rb_eThreadError,
2859 "stopping only thread\n\tnote: use sleep to stop forever");
2860 }
2862 return Qnil;
2863}
2864
2865/*
2866 * call-seq:
2867 * Thread.stop -> nil
2868 *
2869 * Stops execution of the current thread, putting it into a ``sleep'' state,
2870 * and schedules execution of another thread.
2871 *
2872 * a = Thread.new { print "a"; Thread.stop; print "c" }
2873 * sleep 0.1 while a.status!='sleep'
2874 * print "b"
2875 * a.run
2876 * a.join
2877 * #=> "abc"
2878 */
2879
2880static VALUE
2881thread_stop(VALUE _)
2882{
2883 return rb_thread_stop();
2884}
2885
2886/********************************************************************/
2887
2888VALUE
2889rb_thread_list(void)
2890{
2891 // TODO
2892 return rb_ractor_thread_list();
2893}
2894
2895/*
2896 * call-seq:
2897 * Thread.list -> array
2898 *
2899 * Returns an array of Thread objects for all threads that are either runnable
2900 * or stopped.
2901 *
2902 * Thread.new { sleep(200) }
2903 * Thread.new { 1000000.times {|i| i*i } }
2904 * Thread.new { Thread.stop }
2905 * Thread.list.each {|t| p t}
2906 *
2907 * This will produce:
2908 *
2909 * #<Thread:0x401b3e84 sleep>
2910 * #<Thread:0x401b3f38 run>
2911 * #<Thread:0x401b3fb0 sleep>
2912 * #<Thread:0x401bdf4c run>
2913 */
2914
2915static VALUE
2916thread_list(VALUE _)
2917{
2918 return rb_thread_list();
2919}
2920
2923{
2924 return GET_THREAD()->self;
2925}
2926
2927/*
2928 * call-seq:
2929 * Thread.current -> thread
2930 *
2931 * Returns the currently executing thread.
2932 *
2933 * Thread.current #=> #<Thread:0x401bdf4c run>
2934 */
2935
2936static VALUE
2937thread_s_current(VALUE klass)
2938{
2939 return rb_thread_current();
2940}
2941
2943rb_thread_main(void)
2944{
2945 return GET_RACTOR()->threads.main->self;
2946}
2947
2948/*
2949 * call-seq:
2950 * Thread.main -> thread
2951 *
2952 * Returns the main thread.
2953 */
2954
2955static VALUE
2956rb_thread_s_main(VALUE klass)
2957{
2958 return rb_thread_main();
2959}
2960
2961
2962/*
2963 * call-seq:
2964 * Thread.abort_on_exception -> true or false
2965 *
2966 * Returns the status of the global ``abort on exception'' condition.
2967 *
2968 * The default is +false+.
2969 *
2970 * When set to +true+, if any thread is aborted by an exception, the
2971 * raised exception will be re-raised in the main thread.
2972 *
2973 * Can also be specified by the global $DEBUG flag or command line option
2974 * +-d+.
2975 *
2976 * See also ::abort_on_exception=.
2977 *
2978 * There is also an instance level method to set this for a specific thread,
2979 * see #abort_on_exception.
2980 */
2981
2982static VALUE
2983rb_thread_s_abort_exc(VALUE _)
2984{
2985 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2986}
2987
2988
2989/*
2990 * call-seq:
2991 * Thread.abort_on_exception= boolean -> true or false
2992 *
2993 * When set to +true+, if any thread is aborted by an exception, the
2994 * raised exception will be re-raised in the main thread.
2995 * Returns the new state.
2996 *
2997 * Thread.abort_on_exception = true
2998 * t1 = Thread.new do
2999 * puts "In new thread"
3000 * raise "Exception from thread"
3001 * end
3002 * sleep(1)
3003 * puts "not reached"
3004 *
3005 * This will produce:
3006 *
3007 * In new thread
3008 * prog.rb:4: Exception from thread (RuntimeError)
3009 * from prog.rb:2:in `initialize'
3010 * from prog.rb:2:in `new'
3011 * from prog.rb:2
3012 *
3013 * See also ::abort_on_exception.
3014 *
3015 * There is also an instance level method to set this for a specific thread,
3016 * see #abort_on_exception=.
3017 */
3018
3019static VALUE
3020rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3021{
3022 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3023 return val;
3024}
3025
3026
3027/*
3028 * call-seq:
3029 * thr.abort_on_exception -> true or false
3030 *
3031 * Returns the status of the thread-local ``abort on exception'' condition for
3032 * this +thr+.
3033 *
3034 * The default is +false+.
3035 *
3036 * See also #abort_on_exception=.
3037 *
3038 * There is also a class level method to set this for all threads, see
3039 * ::abort_on_exception.
3040 */
3041
3042static VALUE
3043rb_thread_abort_exc(VALUE thread)
3044{
3045 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3046}
3047
3048
3049/*
3050 * call-seq:
3051 * thr.abort_on_exception= boolean -> true or false
3052 *
3053 * When set to +true+, if this +thr+ is aborted by an exception, the
3054 * raised exception will be re-raised in the main thread.
3055 *
3056 * See also #abort_on_exception.
3057 *
3058 * There is also a class level method to set this for all threads, see
3059 * ::abort_on_exception=.
3060 */
3061
3062static VALUE
3063rb_thread_abort_exc_set(VALUE thread, VALUE val)
3064{
3065 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3066 return val;
3067}
3068
3069
3070/*
3071 * call-seq:
3072 * Thread.report_on_exception -> true or false
3073 *
3074 * Returns the status of the global ``report on exception'' condition.
3075 *
3076 * The default is +true+ since Ruby 2.5.
3077 *
3078 * All threads created when this flag is true will report
3079 * a message on $stderr if an exception kills the thread.
3080 *
3081 * Thread.new { 1.times { raise } }
3082 *
3083 * will produce this output on $stderr:
3084 *
3085 * #<Thread:...> terminated with exception (report_on_exception is true):
3086 * Traceback (most recent call last):
3087 * 2: from -e:1:in `block in <main>'
3088 * 1: from -e:1:in `times'
3089 *
3090 * This is done to catch errors in threads early.
3091 * In some cases, you might not want this output.
3092 * There are multiple ways to avoid the extra output:
3093 *
3094 * * If the exception is not intended, the best is to fix the cause of
3095 * the exception so it does not happen anymore.
3096 * * If the exception is intended, it might be better to rescue it closer to
3097 * where it is raised rather then let it kill the Thread.
3098 * * If it is guaranteed the Thread will be joined with Thread#join or
3099 * Thread#value, then it is safe to disable this report with
3100 * <code>Thread.current.report_on_exception = false</code>
3101 * when starting the Thread.
3102 * However, this might handle the exception much later, or not at all
3103 * if the Thread is never joined due to the parent thread being blocked, etc.
3104 *
3105 * See also ::report_on_exception=.
3106 *
3107 * There is also an instance level method to set this for a specific thread,
3108 * see #report_on_exception=.
3109 *
3110 */
3111
3112static VALUE
3113rb_thread_s_report_exc(VALUE _)
3114{
3115 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3116}
3117
3118
3119/*
3120 * call-seq:
3121 * Thread.report_on_exception= boolean -> true or false
3122 *
3123 * Returns the new state.
3124 * When set to +true+, all threads created afterwards will inherit the
3125 * condition and report a message on $stderr if an exception kills a thread:
3126 *
3127 * Thread.report_on_exception = true
3128 * t1 = Thread.new do
3129 * puts "In new thread"
3130 * raise "Exception from thread"
3131 * end
3132 * sleep(1)
3133 * puts "In the main thread"
3134 *
3135 * This will produce:
3136 *
3137 * In new thread
3138 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3139 * Traceback (most recent call last):
3140 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3141 * In the main thread
3142 *
3143 * See also ::report_on_exception.
3144 *
3145 * There is also an instance level method to set this for a specific thread,
3146 * see #report_on_exception=.
3147 */
3148
3149static VALUE
3150rb_thread_s_report_exc_set(VALUE self, VALUE val)
3151{
3152 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3153 return val;
3154}
3155
3156
3157/*
3158 * call-seq:
3159 * Thread.ignore_deadlock -> true or false
3160 *
3161 * Returns the status of the global ``ignore deadlock'' condition.
3162 * The default is +false+, so that deadlock conditions are not ignored.
3163 *
3164 * See also ::ignore_deadlock=.
3165 *
3166 */
3167
3168static VALUE
3169rb_thread_s_ignore_deadlock(VALUE _)
3170{
3171 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3172}
3173
3174
3175/*
3176 * call-seq:
3177 * Thread.ignore_deadlock = boolean -> true or false
3178 *
3179 * Returns the new state.
3180 * When set to +true+, the VM will not check for deadlock conditions.
3181 * It is only useful to set this if your application can break a
3182 * deadlock condition via some other means, such as a signal.
3183 *
3184 * Thread.ignore_deadlock = true
3185 * queue = Thread::Queue.new
3186 *
3187 * trap(:SIGUSR1){queue.push "Received signal"}
3188 *
3189 * # raises fatal error unless ignoring deadlock
3190 * puts queue.pop
3191 *
3192 * See also ::ignore_deadlock.
3193 */
3194
3195static VALUE
3196rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3197{
3198 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3199 return val;
3200}
3201
3202
3203/*
3204 * call-seq:
3205 * thr.report_on_exception -> true or false
3206 *
3207 * Returns the status of the thread-local ``report on exception'' condition for
3208 * this +thr+.
3209 *
3210 * The default value when creating a Thread is the value of
3211 * the global flag Thread.report_on_exception.
3212 *
3213 * See also #report_on_exception=.
3214 *
3215 * There is also a class level method to set this for all new threads, see
3216 * ::report_on_exception=.
3217 */
3218
3219static VALUE
3220rb_thread_report_exc(VALUE thread)
3221{
3222 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3223}
3224
3225
3226/*
3227 * call-seq:
3228 * thr.report_on_exception= boolean -> true or false
3229 *
3230 * When set to +true+, a message is printed on $stderr if an exception
3231 * kills this +thr+. See ::report_on_exception for details.
3232 *
3233 * See also #report_on_exception.
3234 *
3235 * There is also a class level method to set this for all new threads, see
3236 * ::report_on_exception=.
3237 */
3238
3239static VALUE
3240rb_thread_report_exc_set(VALUE thread, VALUE val)
3241{
3242 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3243 return val;
3244}
3245
3246
3247/*
3248 * call-seq:
3249 * thr.group -> thgrp or nil
3250 *
3251 * Returns the ThreadGroup which contains the given thread.
3252 *
3253 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3254 */
3255
3256VALUE
3257rb_thread_group(VALUE thread)
3258{
3259 return rb_thread_ptr(thread)->thgroup;
3260}
3261
3262static const char *
3263thread_status_name(rb_thread_t *th, int detail)
3264{
3265 switch (th->status) {
3266 case THREAD_RUNNABLE:
3267 return th->to_kill ? "aborting" : "run";
3268 case THREAD_STOPPED_FOREVER:
3269 if (detail) return "sleep_forever";
3270 case THREAD_STOPPED:
3271 return "sleep";
3272 case THREAD_KILLED:
3273 return "dead";
3274 default:
3275 return "unknown";
3276 }
3277}
3278
3279static int
3280rb_threadptr_dead(rb_thread_t *th)
3281{
3282 return th->status == THREAD_KILLED;
3283}
3284
3285
3286/*
3287 * call-seq:
3288 * thr.status -> string, false or nil
3289 *
3290 * Returns the status of +thr+.
3291 *
3292 * [<tt>"sleep"</tt>]
3293 * Returned if this thread is sleeping or waiting on I/O
3294 * [<tt>"run"</tt>]
3295 * When this thread is executing
3296 * [<tt>"aborting"</tt>]
3297 * If this thread is aborting
3298 * [+false+]
3299 * When this thread is terminated normally
3300 * [+nil+]
3301 * If terminated with an exception.
3302 *
3303 * a = Thread.new { raise("die now") }
3304 * b = Thread.new { Thread.stop }
3305 * c = Thread.new { Thread.exit }
3306 * d = Thread.new { sleep }
3307 * d.kill #=> #<Thread:0x401b3678 aborting>
3308 * a.status #=> nil
3309 * b.status #=> "sleep"
3310 * c.status #=> false
3311 * d.status #=> "aborting"
3312 * Thread.current.status #=> "run"
3313 *
3314 * See also the instance methods #alive? and #stop?
3315 */
3316
3317static VALUE
3318rb_thread_status(VALUE thread)
3319{
3320 rb_thread_t *target_th = rb_thread_ptr(thread);
3321
3322 if (rb_threadptr_dead(target_th)) {
3323 if (!NIL_P(target_th->ec->errinfo) &&
3324 !FIXNUM_P(target_th->ec->errinfo)) {
3325 return Qnil;
3326 }
3327 else {
3328 return Qfalse;
3329 }
3330 }
3331 else {
3332 return rb_str_new2(thread_status_name(target_th, FALSE));
3333 }
3334}
3335
3336
3337/*
3338 * call-seq:
3339 * thr.alive? -> true or false
3340 *
3341 * Returns +true+ if +thr+ is running or sleeping.
3342 *
3343 * thr = Thread.new { }
3344 * thr.join #=> #<Thread:0x401b3fb0 dead>
3345 * Thread.current.alive? #=> true
3346 * thr.alive? #=> false
3347 *
3348 * See also #stop? and #status.
3349 */
3350
3351static VALUE
3352rb_thread_alive_p(VALUE thread)
3353{
3354 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3355}
3356
3357/*
3358 * call-seq:
3359 * thr.stop? -> true or false
3360 *
3361 * Returns +true+ if +thr+ is dead or sleeping.
3362 *
3363 * a = Thread.new { Thread.stop }
3364 * b = Thread.current
3365 * a.stop? #=> true
3366 * b.stop? #=> false
3367 *
3368 * See also #alive? and #status.
3369 */
3370
3371static VALUE
3372rb_thread_stop_p(VALUE thread)
3373{
3374 rb_thread_t *th = rb_thread_ptr(thread);
3375
3376 if (rb_threadptr_dead(th)) {
3377 return Qtrue;
3378 }
3379 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3380}
3381
3382/*
3383 * call-seq:
3384 * thr.name -> string
3385 *
3386 * show the name of the thread.
3387 */
3388
3389static VALUE
3390rb_thread_getname(VALUE thread)
3391{
3392 return rb_thread_ptr(thread)->name;
3393}
3394
3395/*
3396 * call-seq:
3397 * thr.name=(name) -> string
3398 *
3399 * set given name to the ruby thread.
3400 * On some platform, it may set the name to pthread and/or kernel.
3401 */
3402
3403static VALUE
3404rb_thread_setname(VALUE thread, VALUE name)
3405{
3406 rb_thread_t *target_th = rb_thread_ptr(thread);
3407
3408 if (!NIL_P(name)) {
3409 rb_encoding *enc;
3410 StringValueCStr(name);
3411 enc = rb_enc_get(name);
3412 if (!rb_enc_asciicompat(enc)) {
3413 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3414 rb_enc_name(enc));
3415 }
3416 name = rb_str_new_frozen(name);
3417 }
3418 target_th->name = name;
3419 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3420 native_set_another_thread_name(target_th->nt->thread_id, name);
3421 }
3422 return name;
3423}
3424
3425#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3426/*
3427 * call-seq:
3428 * thr.native_thread_id -> integer
3429 *
3430 * Return the native thread ID which is used by the Ruby thread.
3431 *
3432 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3433 * * On Linux it is TID returned by gettid(2).
3434 * * On macOS it is the system-wide unique integral ID of thread returned
3435 * by pthread_threadid_np(3).
3436 * * On FreeBSD it is the unique integral ID of the thread returned by
3437 * pthread_getthreadid_np(3).
3438 * * On Windows it is the thread identifier returned by GetThreadId().
3439 * * On other platforms, it raises NotImplementedError.
3440 *
3441 * NOTE:
3442 * If the thread is not associated yet or already deassociated with a native
3443 * thread, it returns _nil_.
3444 * If the Ruby implementation uses M:N thread model, the ID may change
3445 * depending on the timing.
3446 */
3447
3448static VALUE
3449rb_thread_native_thread_id(VALUE thread)
3450{
3451 rb_thread_t *target_th = rb_thread_ptr(thread);
3452 if (rb_threadptr_dead(target_th)) return Qnil;
3453 return native_thread_native_thread_id(target_th);
3454}
3455#else
3456# define rb_thread_native_thread_id rb_f_notimplement
3457#endif
3458
3459/*
3460 * call-seq:
3461 * thr.to_s -> string
3462 *
3463 * Dump the name, id, and status of _thr_ to a string.
3464 */
3465
3466static VALUE
3467rb_thread_to_s(VALUE thread)
3468{
3469 VALUE cname = rb_class_path(rb_obj_class(thread));
3470 rb_thread_t *target_th = rb_thread_ptr(thread);
3471 const char *status;
3472 VALUE str, loc;
3473
3474 status = thread_status_name(target_th, TRUE);
3475 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3476 if (!NIL_P(target_th->name)) {
3477 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3478 }
3479 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3480 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3481 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3482 }
3483 rb_str_catf(str, " %s>", status);
3484
3485 return str;
3486}
3487
3488/* variables for recursive traversals */
3489#define recursive_key id__recursive_key__
3490
3491static VALUE
3492threadptr_local_aref(rb_thread_t *th, ID id)
3493{
3494 if (id == recursive_key) {
3495 return th->ec->local_storage_recursive_hash;
3496 }
3497 else {
3498 VALUE val;
3499 struct rb_id_table *local_storage = th->ec->local_storage;
3500
3501 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3502 return val;
3503 }
3504 else {
3505 return Qnil;
3506 }
3507 }
3508}
3509
3511rb_thread_local_aref(VALUE thread, ID id)
3512{
3513 return threadptr_local_aref(rb_thread_ptr(thread), id);
3514}
3515
3516/*
3517 * call-seq:
3518 * thr[sym] -> obj or nil
3519 *
3520 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3521 * if not explicitly inside a Fiber), using either a symbol or a string name.
3522 * If the specified variable does not exist, returns +nil+.
3523 *
3524 * [
3525 * Thread.new { Thread.current["name"] = "A" },
3526 * Thread.new { Thread.current[:name] = "B" },
3527 * Thread.new { Thread.current["name"] = "C" }
3528 * ].each do |th|
3529 * th.join
3530 * puts "#{th.inspect}: #{th[:name]}"
3531 * end
3532 *
3533 * This will produce:
3534 *
3535 * #<Thread:0x00000002a54220 dead>: A
3536 * #<Thread:0x00000002a541a8 dead>: B
3537 * #<Thread:0x00000002a54130 dead>: C
3538 *
3539 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3540 * This confusion did not exist in Ruby 1.8 because
3541 * fibers are only available since Ruby 1.9.
3542 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3543 * following idiom for dynamic scope.
3544 *
3545 * def meth(newvalue)
3546 * begin
3547 * oldvalue = Thread.current[:name]
3548 * Thread.current[:name] = newvalue
3549 * yield
3550 * ensure
3551 * Thread.current[:name] = oldvalue
3552 * end
3553 * end
3554 *
3555 * The idiom may not work as dynamic scope if the methods are thread-local
3556 * and a given block switches fiber.
3557 *
3558 * f = Fiber.new {
3559 * meth(1) {
3560 * Fiber.yield
3561 * }
3562 * }
3563 * meth(2) {
3564 * f.resume
3565 * }
3566 * f.resume
3567 * p Thread.current[:name]
3568 * #=> nil if fiber-local
3569 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3570 *
3571 * For thread-local variables, please see #thread_variable_get and
3572 * #thread_variable_set.
3573 *
3574 */
3575
3576static VALUE
3577rb_thread_aref(VALUE thread, VALUE key)
3578{
3579 ID id = rb_check_id(&key);
3580 if (!id) return Qnil;
3581 return rb_thread_local_aref(thread, id);
3582}
3583
3584/*
3585 * call-seq:
3586 * thr.fetch(sym) -> obj
3587 * thr.fetch(sym) { } -> obj
3588 * thr.fetch(sym, default) -> obj
3589 *
3590 * Returns a fiber-local for the given key. If the key can't be
3591 * found, there are several options: With no other arguments, it will
3592 * raise a KeyError exception; if <i>default</i> is given, then that
3593 * will be returned; if the optional code block is specified, then
3594 * that will be run and its result returned. See Thread#[] and
3595 * Hash#fetch.
3596 */
3597static VALUE
3598rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3599{
3600 VALUE key, val;
3601 ID id;
3602 rb_thread_t *target_th = rb_thread_ptr(self);
3603 int block_given;
3604
3605 rb_check_arity(argc, 1, 2);
3606 key = argv[0];
3607
3608 block_given = rb_block_given_p();
3609 if (block_given && argc == 2) {
3610 rb_warn("block supersedes default value argument");
3611 }
3612
3613 id = rb_check_id(&key);
3614
3615 if (id == recursive_key) {
3616 return target_th->ec->local_storage_recursive_hash;
3617 }
3618 else if (id && target_th->ec->local_storage &&
3619 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3620 return val;
3621 }
3622 else if (block_given) {
3623 return rb_yield(key);
3624 }
3625 else if (argc == 1) {
3626 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3627 }
3628 else {
3629 return argv[1];
3630 }
3631}
3632
3633static VALUE
3634threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3635{
3636 if (id == recursive_key) {
3637 th->ec->local_storage_recursive_hash = val;
3638 return val;
3639 }
3640 else {
3641 struct rb_id_table *local_storage = th->ec->local_storage;
3642
3643 if (NIL_P(val)) {
3644 if (!local_storage) return Qnil;
3645 rb_id_table_delete(local_storage, id);
3646 return Qnil;
3647 }
3648 else {
3649 if (local_storage == NULL) {
3650 th->ec->local_storage = local_storage = rb_id_table_create(0);
3651 }
3652 rb_id_table_insert(local_storage, id, val);
3653 return val;
3654 }
3655 }
3656}
3657
3659rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3660{
3661 if (OBJ_FROZEN(thread)) {
3662 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3663 }
3664
3665 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3666}
3667
3668/*
3669 * call-seq:
3670 * thr[sym] = obj -> obj
3671 *
3672 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3673 * using either a symbol or a string.
3674 *
3675 * See also Thread#[].
3676 *
3677 * For thread-local variables, please see #thread_variable_set and
3678 * #thread_variable_get.
3679 */
3680
3681static VALUE
3682rb_thread_aset(VALUE self, VALUE id, VALUE val)
3683{
3684 return rb_thread_local_aset(self, rb_to_id(id), val);
3685}
3686
3687/*
3688 * call-seq:
3689 * thr.thread_variable_get(key) -> obj or nil
3690 *
3691 * Returns the value of a thread local variable that has been set. Note that
3692 * these are different than fiber local values. For fiber local values,
3693 * please see Thread#[] and Thread#[]=.
3694 *
3695 * Thread local values are carried along with threads, and do not respect
3696 * fibers. For example:
3697 *
3698 * Thread.new {
3699 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3700 * Thread.current["foo"] = "bar" # set a fiber local
3701 *
3702 * Fiber.new {
3703 * Fiber.yield [
3704 * Thread.current.thread_variable_get("foo"), # get the thread local
3705 * Thread.current["foo"], # get the fiber local
3706 * ]
3707 * }.resume
3708 * }.join.value # => ['bar', nil]
3709 *
3710 * The value "bar" is returned for the thread local, where nil is returned
3711 * for the fiber local. The fiber is executed in the same thread, so the
3712 * thread local values are available.
3713 */
3714
3715static VALUE
3716rb_thread_variable_get(VALUE thread, VALUE key)
3717{
3718 VALUE locals;
3719
3720 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3721 return Qnil;
3722 }
3723 locals = rb_thread_local_storage(thread);
3724 return rb_hash_aref(locals, rb_to_symbol(key));
3725}
3726
3727/*
3728 * call-seq:
3729 * thr.thread_variable_set(key, value)
3730 *
3731 * Sets a thread local with +key+ to +value+. Note that these are local to
3732 * threads, and not to fibers. Please see Thread#thread_variable_get and
3733 * Thread#[] for more information.
3734 */
3735
3736static VALUE
3737rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3738{
3739 VALUE locals;
3740
3741 if (OBJ_FROZEN(thread)) {
3742 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3743 }
3744
3745 locals = rb_thread_local_storage(thread);
3746 return rb_hash_aset(locals, rb_to_symbol(key), val);
3747}
3748
3749/*
3750 * call-seq:
3751 * thr.key?(sym) -> true or false
3752 *
3753 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3754 * variable.
3755 *
3756 * me = Thread.current
3757 * me[:oliver] = "a"
3758 * me.key?(:oliver) #=> true
3759 * me.key?(:stanley) #=> false
3760 */
3761
3762static VALUE
3763rb_thread_key_p(VALUE self, VALUE key)
3764{
3765 VALUE val;
3766 ID id = rb_check_id(&key);
3767 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3768
3769 if (!id || local_storage == NULL) {
3770 return Qfalse;
3771 }
3772 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3773}
3774
3775static enum rb_id_table_iterator_result
3776thread_keys_i(ID key, VALUE value, void *ary)
3777{
3778 rb_ary_push((VALUE)ary, ID2SYM(key));
3779 return ID_TABLE_CONTINUE;
3780}
3781
3783rb_thread_alone(void)
3784{
3785 // TODO
3786 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3787}
3788
3789/*
3790 * call-seq:
3791 * thr.keys -> array
3792 *
3793 * Returns an array of the names of the fiber-local variables (as Symbols).
3794 *
3795 * thr = Thread.new do
3796 * Thread.current[:cat] = 'meow'
3797 * Thread.current["dog"] = 'woof'
3798 * end
3799 * thr.join #=> #<Thread:0x401b3f10 dead>
3800 * thr.keys #=> [:dog, :cat]
3801 */
3802
3803static VALUE
3804rb_thread_keys(VALUE self)
3805{
3806 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3807 VALUE ary = rb_ary_new();
3808
3809 if (local_storage) {
3810 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3811 }
3812 return ary;
3813}
3814
3815static int
3816keys_i(VALUE key, VALUE value, VALUE ary)
3817{
3818 rb_ary_push(ary, key);
3819 return ST_CONTINUE;
3820}
3821
3822/*
3823 * call-seq:
3824 * thr.thread_variables -> array
3825 *
3826 * Returns an array of the names of the thread-local variables (as Symbols).
3827 *
3828 * thr = Thread.new do
3829 * Thread.current.thread_variable_set(:cat, 'meow')
3830 * Thread.current.thread_variable_set("dog", 'woof')
3831 * end
3832 * thr.join #=> #<Thread:0x401b3f10 dead>
3833 * thr.thread_variables #=> [:dog, :cat]
3834 *
3835 * Note that these are not fiber local variables. Please see Thread#[] and
3836 * Thread#thread_variable_get for more details.
3837 */
3838
3839static VALUE
3840rb_thread_variables(VALUE thread)
3841{
3842 VALUE locals;
3843 VALUE ary;
3844
3845 ary = rb_ary_new();
3846 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3847 return ary;
3848 }
3849 locals = rb_thread_local_storage(thread);
3850 rb_hash_foreach(locals, keys_i, ary);
3851
3852 return ary;
3853}
3854
3855/*
3856 * call-seq:
3857 * thr.thread_variable?(key) -> true or false
3858 *
3859 * Returns +true+ if the given string (or symbol) exists as a thread-local
3860 * variable.
3861 *
3862 * me = Thread.current
3863 * me.thread_variable_set(:oliver, "a")
3864 * me.thread_variable?(:oliver) #=> true
3865 * me.thread_variable?(:stanley) #=> false
3866 *
3867 * Note that these are not fiber local variables. Please see Thread#[] and
3868 * Thread#thread_variable_get for more details.
3869 */
3870
3871static VALUE
3872rb_thread_variable_p(VALUE thread, VALUE key)
3873{
3874 VALUE locals;
3875
3876 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3877 return Qfalse;
3878 }
3879 locals = rb_thread_local_storage(thread);
3880
3881 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3882}
3883
3884/*
3885 * call-seq:
3886 * thr.priority -> integer
3887 *
3888 * Returns the priority of <i>thr</i>. Default is inherited from the
3889 * current thread which creating the new thread, or zero for the
3890 * initial main thread; higher-priority thread will run more frequently
3891 * than lower-priority threads (but lower-priority threads can also run).
3892 *
3893 * This is just hint for Ruby thread scheduler. It may be ignored on some
3894 * platform.
3895 *
3896 * Thread.current.priority #=> 0
3897 */
3898
3899static VALUE
3900rb_thread_priority(VALUE thread)
3901{
3902 return INT2NUM(rb_thread_ptr(thread)->priority);
3903}
3904
3905
3906/*
3907 * call-seq:
3908 * thr.priority= integer -> thr
3909 *
3910 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3911 * will run more frequently than lower-priority threads (but lower-priority
3912 * threads can also run).
3913 *
3914 * This is just hint for Ruby thread scheduler. It may be ignored on some
3915 * platform.
3916 *
3917 * count1 = count2 = 0
3918 * a = Thread.new do
3919 * loop { count1 += 1 }
3920 * end
3921 * a.priority = -1
3922 *
3923 * b = Thread.new do
3924 * loop { count2 += 1 }
3925 * end
3926 * b.priority = -2
3927 * sleep 1 #=> 1
3928 * count1 #=> 622504
3929 * count2 #=> 5832
3930 */
3931
3932static VALUE
3933rb_thread_priority_set(VALUE thread, VALUE prio)
3934{
3935 rb_thread_t *target_th = rb_thread_ptr(thread);
3936 int priority;
3937
3938#if USE_NATIVE_THREAD_PRIORITY
3939 target_th->priority = NUM2INT(prio);
3940 native_thread_apply_priority(th);
3941#else
3942 priority = NUM2INT(prio);
3943 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3944 priority = RUBY_THREAD_PRIORITY_MAX;
3945 }
3946 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3947 priority = RUBY_THREAD_PRIORITY_MIN;
3948 }
3949 target_th->priority = (int8_t)priority;
3950#endif
3951 return INT2NUM(target_th->priority);
3952}
3953
3954/* for IO */
3955
3956#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3957
3958/*
3959 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3960 * in select(2) system call.
3961 *
3962 * - Linux 2.2.12 (?)
3963 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3964 * select(2) documents how to allocate fd_set dynamically.
3965 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3966 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3967 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3968 * select(2) documents how to allocate fd_set dynamically.
3969 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3970 * - Solaris 8 has select_large_fdset
3971 * - Mac OS X 10.7 (Lion)
3972 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3973 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3974 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3975 *
3976 * When fd_set is not big enough to hold big file descriptors,
3977 * it should be allocated dynamically.
3978 * Note that this assumes fd_set is structured as bitmap.
3979 *
3980 * rb_fd_init allocates the memory.
3981 * rb_fd_term free the memory.
3982 * rb_fd_set may re-allocates bitmap.
3983 *
3984 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3985 */
3986
3987void
3989{
3990 fds->maxfd = 0;
3991 fds->fdset = ALLOC(fd_set);
3992 FD_ZERO(fds->fdset);
3993}
3994
3995void
3996rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3997{
3998 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3999
4000 if (size < sizeof(fd_set))
4001 size = sizeof(fd_set);
4002 dst->maxfd = src->maxfd;
4003 dst->fdset = xmalloc(size);
4004 memcpy(dst->fdset, src->fdset, size);
4005}
4006
4007void
4009{
4010 xfree(fds->fdset);
4011 fds->maxfd = 0;
4012 fds->fdset = 0;
4013}
4014
4015void
4017{
4018 if (fds->fdset)
4019 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4020}
4021
4022static void
4023rb_fd_resize(int n, rb_fdset_t *fds)
4024{
4025 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4026 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4027
4028 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4029 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4030
4031 if (m > o) {
4032 fds->fdset = xrealloc(fds->fdset, m);
4033 memset((char *)fds->fdset + o, 0, m - o);
4034 }
4035 if (n >= fds->maxfd) fds->maxfd = n + 1;
4036}
4037
4038void
4039rb_fd_set(int n, rb_fdset_t *fds)
4040{
4041 rb_fd_resize(n, fds);
4042 FD_SET(n, fds->fdset);
4043}
4044
4045void
4046rb_fd_clr(int n, rb_fdset_t *fds)
4047{
4048 if (n >= fds->maxfd) return;
4049 FD_CLR(n, fds->fdset);
4050}
4051
4052int
4053rb_fd_isset(int n, const rb_fdset_t *fds)
4054{
4055 if (n >= fds->maxfd) return 0;
4056 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4057}
4058
4059void
4060rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4061{
4062 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4063
4064 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4065 dst->maxfd = max;
4066 dst->fdset = xrealloc(dst->fdset, size);
4067 memcpy(dst->fdset, src, size);
4068}
4069
4070void
4071rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4072{
4073 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4074
4075 if (size < sizeof(fd_set))
4076 size = sizeof(fd_set);
4077 dst->maxfd = src->maxfd;
4078 dst->fdset = xrealloc(dst->fdset, size);
4079 memcpy(dst->fdset, src->fdset, size);
4080}
4081
4082int
4083rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4084{
4085 fd_set *r = NULL, *w = NULL, *e = NULL;
4086 if (readfds) {
4087 rb_fd_resize(n - 1, readfds);
4088 r = rb_fd_ptr(readfds);
4089 }
4090 if (writefds) {
4091 rb_fd_resize(n - 1, writefds);
4092 w = rb_fd_ptr(writefds);
4093 }
4094 if (exceptfds) {
4095 rb_fd_resize(n - 1, exceptfds);
4096 e = rb_fd_ptr(exceptfds);
4097 }
4098 return select(n, r, w, e, timeout);
4099}
4100
4101#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4102
4103#undef FD_ZERO
4104#undef FD_SET
4105#undef FD_CLR
4106#undef FD_ISSET
4107
4108#define FD_ZERO(f) rb_fd_zero(f)
4109#define FD_SET(i, f) rb_fd_set((i), (f))
4110#define FD_CLR(i, f) rb_fd_clr((i), (f))
4111#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4112
4113#elif defined(_WIN32)
4114
4115void
4117{
4118 set->capa = FD_SETSIZE;
4119 set->fdset = ALLOC(fd_set);
4120 FD_ZERO(set->fdset);
4121}
4122
4123void
4124rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4125{
4126 rb_fd_init(dst);
4127 rb_fd_dup(dst, src);
4128}
4129
4130void
4132{
4133 xfree(set->fdset);
4134 set->fdset = NULL;
4135 set->capa = 0;
4136}
4137
4138void
4139rb_fd_set(int fd, rb_fdset_t *set)
4140{
4141 unsigned int i;
4142 SOCKET s = rb_w32_get_osfhandle(fd);
4143
4144 for (i = 0; i < set->fdset->fd_count; i++) {
4145 if (set->fdset->fd_array[i] == s) {
4146 return;
4147 }
4148 }
4149 if (set->fdset->fd_count >= (unsigned)set->capa) {
4150 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4151 set->fdset =
4152 rb_xrealloc_mul_add(
4153 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4154 }
4155 set->fdset->fd_array[set->fdset->fd_count++] = s;
4156}
4157
4158#undef FD_ZERO
4159#undef FD_SET
4160#undef FD_CLR
4161#undef FD_ISSET
4162
4163#define FD_ZERO(f) rb_fd_zero(f)
4164#define FD_SET(i, f) rb_fd_set((i), (f))
4165#define FD_CLR(i, f) rb_fd_clr((i), (f))
4166#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4167
4168#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4169
4170#endif
4171
4172#ifndef rb_fd_no_init
4173#define rb_fd_no_init(fds) (void)(fds)
4174#endif
4175
4176static int
4177wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4178{
4179 if (*result < 0) {
4180 switch (errnum) {
4181 case EINTR:
4182#ifdef ERESTART
4183 case ERESTART:
4184#endif
4185 *result = 0;
4186 if (rel && hrtime_update_expire(rel, end)) {
4187 *rel = 0;
4188 }
4189 return TRUE;
4190 }
4191 return FALSE;
4192 }
4193 else if (*result == 0) {
4194 /* check for spurious wakeup */
4195 if (rel) {
4196 return !hrtime_update_expire(rel, end);
4197 }
4198 return TRUE;
4199 }
4200 return FALSE;
4201}
4203struct select_set {
4204 int max;
4205 rb_thread_t *th;
4206 rb_fdset_t *rset;
4207 rb_fdset_t *wset;
4208 rb_fdset_t *eset;
4209 rb_fdset_t orig_rset;
4210 rb_fdset_t orig_wset;
4211 rb_fdset_t orig_eset;
4212 struct timeval *timeout;
4213};
4214
4215static VALUE
4216select_set_free(VALUE p)
4217{
4218 struct select_set *set = (struct select_set *)p;
4219
4220 rb_fd_term(&set->orig_rset);
4221 rb_fd_term(&set->orig_wset);
4222 rb_fd_term(&set->orig_eset);
4223
4224 return Qfalse;
4225}
4226
4227static VALUE
4228do_select(VALUE p)
4229{
4230 struct select_set *set = (struct select_set *)p;
4231 volatile int result = 0;
4232 int lerrno;
4233 rb_hrtime_t *to, rel, end = 0;
4234
4235 timeout_prepare(&to, &rel, &end, set->timeout);
4236 volatile rb_hrtime_t endtime = end;
4237#define restore_fdset(dst, src) \
4238 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4239#define do_select_update() \
4240 (restore_fdset(set->rset, &set->orig_rset), \
4241 restore_fdset(set->wset, &set->orig_wset), \
4242 restore_fdset(set->eset, &set->orig_eset), \
4243 TRUE)
4244
4245 do {
4246 lerrno = 0;
4247
4248 BLOCKING_REGION(set->th, {
4249 struct timeval tv;
4250
4251 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4252 result = native_fd_select(set->max,
4253 set->rset, set->wset, set->eset,
4254 rb_hrtime2timeval(&tv, to), set->th);
4255 if (result < 0) lerrno = errno;
4256 }
4257 }, ubf_select, set->th, TRUE);
4258
4259 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4260 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4261
4262 if (result < 0) {
4263 errno = lerrno;
4264 }
4265
4266 return (VALUE)result;
4267}
4268
4270rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4271 struct timeval *timeout)
4272{
4273 struct select_set set;
4274
4275 set.th = GET_THREAD();
4276 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4277 set.max = max;
4278 set.rset = read;
4279 set.wset = write;
4280 set.eset = except;
4281 set.timeout = timeout;
4282
4283 if (!set.rset && !set.wset && !set.eset) {
4284 if (!timeout) {
4286 return 0;
4287 }
4288 rb_thread_wait_for(*timeout);
4289 return 0;
4290 }
4291
4292#define fd_init_copy(f) do { \
4293 if (set.f) { \
4294 rb_fd_resize(set.max - 1, set.f); \
4295 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4296 rb_fd_init_copy(&set.orig_##f, set.f); \
4297 } \
4298 } \
4299 else { \
4300 rb_fd_no_init(&set.orig_##f); \
4301 } \
4302 } while (0)
4303 fd_init_copy(rset);
4304 fd_init_copy(wset);
4305 fd_init_copy(eset);
4306#undef fd_init_copy
4307
4308 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4309}
4310
4311#ifdef USE_POLL
4312
4313/* The same with linux kernel. TODO: make platform independent definition. */
4314#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4315#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4316#define POLLEX_SET (POLLPRI)
4317
4318#ifndef POLLERR_SET /* defined for FreeBSD for now */
4319# define POLLERR_SET (0)
4320#endif
4321
4322/*
4323 * returns a mask of events
4324 */
4325int
4326rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4327{
4328 struct pollfd fds[1];
4329 volatile int result = 0;
4330 nfds_t nfds;
4331 struct waiting_fd wfd;
4332 int state;
4333 volatile int lerrno;
4334
4335 rb_execution_context_t *ec = GET_EC();
4336 rb_thread_t *th = rb_ec_thread_ptr(ec);
4337
4338 if (thread_io_wait_events(th, ec, fd, events, timeout, &wfd)) {
4339 return 0; // timeout
4340 }
4341
4342 thread_io_setup_wfd(th, fd, &wfd);
4343
4344 EC_PUSH_TAG(wfd.th->ec);
4345 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4346 rb_hrtime_t *to, rel, end = 0;
4347 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4348 timeout_prepare(&to, &rel, &end, timeout);
4349 volatile rb_hrtime_t endtime = end;
4350 fds[0].fd = fd;
4351 fds[0].events = (short)events;
4352 fds[0].revents = 0;
4353 do {
4354 nfds = 1;
4355
4356 lerrno = 0;
4357 BLOCKING_REGION(wfd.th, {
4358 struct timespec ts;
4359
4360 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4361 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4362 if (result < 0) lerrno = errno;
4363 }
4364 }, ubf_select, wfd.th, TRUE);
4365
4366 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4367 } while (wait_retryable(&result, lerrno, to, endtime));
4368 }
4369 EC_POP_TAG();
4370
4371 thread_io_wake_pending_closer(&wfd);
4372
4373 if (state) {
4374 EC_JUMP_TAG(wfd.th->ec, state);
4375 }
4376
4377 if (result < 0) {
4378 errno = lerrno;
4379 return -1;
4380 }
4381
4382 if (fds[0].revents & POLLNVAL) {
4383 errno = EBADF;
4384 return -1;
4385 }
4386
4387 /*
4388 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4389 * Therefore we need to fix it up.
4390 */
4391 result = 0;
4392 if (fds[0].revents & POLLIN_SET)
4393 result |= RB_WAITFD_IN;
4394 if (fds[0].revents & POLLOUT_SET)
4395 result |= RB_WAITFD_OUT;
4396 if (fds[0].revents & POLLEX_SET)
4397 result |= RB_WAITFD_PRI;
4398
4399 /* all requested events are ready if there is an error */
4400 if (fds[0].revents & POLLERR_SET)
4401 result |= events;
4402
4403 return result;
4404}
4405#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4406struct select_args {
4407 union {
4408 int fd;
4409 int error;
4410 } as;
4411 rb_fdset_t *read;
4412 rb_fdset_t *write;
4413 rb_fdset_t *except;
4414 struct waiting_fd wfd;
4415 struct timeval *tv;
4416};
4417
4418static VALUE
4419select_single(VALUE ptr)
4420{
4421 struct select_args *args = (struct select_args *)ptr;
4422 int r;
4423
4424 r = rb_thread_fd_select(args->as.fd + 1,
4425 args->read, args->write, args->except, args->tv);
4426 if (r == -1)
4427 args->as.error = errno;
4428 if (r > 0) {
4429 r = 0;
4430 if (args->read && rb_fd_isset(args->as.fd, args->read))
4431 r |= RB_WAITFD_IN;
4432 if (args->write && rb_fd_isset(args->as.fd, args->write))
4433 r |= RB_WAITFD_OUT;
4434 if (args->except && rb_fd_isset(args->as.fd, args->except))
4435 r |= RB_WAITFD_PRI;
4436 }
4437 return (VALUE)r;
4438}
4439
4440static VALUE
4441select_single_cleanup(VALUE ptr)
4442{
4443 struct select_args *args = (struct select_args *)ptr;
4444
4445 thread_io_wake_pending_closer(&args->wfd);
4446 if (args->read) rb_fd_term(args->read);
4447 if (args->write) rb_fd_term(args->write);
4448 if (args->except) rb_fd_term(args->except);
4449
4450 return (VALUE)-1;
4451}
4452
4453static rb_fdset_t *
4454init_set_fd(int fd, rb_fdset_t *fds)
4455{
4456 if (fd < 0) {
4457 return 0;
4458 }
4459 rb_fd_init(fds);
4460 rb_fd_set(fd, fds);
4461
4462 return fds;
4463}
4464
4465int
4466rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4467{
4468 rb_fdset_t rfds, wfds, efds;
4469 struct select_args args;
4470 int r;
4471 VALUE ptr = (VALUE)&args;
4472 rb_execution_context_t *ec = GET_EC();
4473 rb_thread_t *th = rb_ec_thread_ptr(ec);
4474
4475 if (thread_io_wait_events(th, ec, fd, events, timeout, &args.wfd)) {
4476 return 0; // timeout
4477 }
4478
4479 args.as.fd = fd;
4480 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4481 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4482 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4483 args.tv = timeout;
4484 args.wfd.fd = fd;
4485 args.wfd.th = th;
4486 args.wfd.busy = NULL;
4487
4488 RB_VM_LOCK_ENTER();
4489 {
4490 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4491 }
4492 RB_VM_LOCK_LEAVE();
4493
4494 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4495 if (r == -1)
4496 errno = args.as.error;
4497
4498 return r;
4499}
4500#endif /* ! USE_POLL */
4501
4502/*
4503 * for GC
4504 */
4505
4506#ifdef USE_CONSERVATIVE_STACK_END
4507void
4508rb_gc_set_stack_end(VALUE **stack_end_p)
4509{
4510 VALUE stack_end;
4511 *stack_end_p = &stack_end;
4512}
4513#endif
4514
4515/*
4516 *
4517 */
4518
4519void
4520rb_threadptr_check_signal(rb_thread_t *mth)
4521{
4522 /* mth must be main_thread */
4523 if (rb_signal_buff_size() > 0) {
4524 /* wakeup main thread */
4525 threadptr_trap_interrupt(mth);
4526 }
4527}
4528
4529static void
4530async_bug_fd(const char *mesg, int errno_arg, int fd)
4531{
4532 char buff[64];
4533 size_t n = strlcpy(buff, mesg, sizeof(buff));
4534 if (n < sizeof(buff)-3) {
4535 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4536 }
4537 rb_async_bug_errno(buff, errno_arg);
4538}
4539
4540/* VM-dependent API is not available for this function */
4541static int
4542consume_communication_pipe(int fd)
4543{
4544#if USE_EVENTFD
4545 uint64_t buff[1];
4546#else
4547 /* buffer can be shared because no one refers to them. */
4548 static char buff[1024];
4549#endif
4550 ssize_t result;
4551 int ret = FALSE; /* for rb_sigwait_sleep */
4552
4553 while (1) {
4554 result = read(fd, buff, sizeof(buff));
4555#if USE_EVENTFD
4556 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4557#else
4558 RUBY_DEBUG_LOG("result:%d", (int)result);
4559#endif
4560 if (result > 0) {
4561 ret = TRUE;
4562 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4563 return ret;
4564 }
4565 }
4566 else if (result == 0) {
4567 return ret;
4568 }
4569 else if (result < 0) {
4570 int e = errno;
4571 switch (e) {
4572 case EINTR:
4573 continue; /* retry */
4574 case EAGAIN:
4575#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4576 case EWOULDBLOCK:
4577#endif
4578 return ret;
4579 default:
4580 async_bug_fd("consume_communication_pipe: read", e, fd);
4581 }
4582 }
4583 }
4584}
4585
4586void
4587rb_thread_stop_timer_thread(void)
4588{
4589 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4590 native_reset_timer_thread();
4591 }
4592}
4593
4594void
4595rb_thread_reset_timer_thread(void)
4596{
4597 native_reset_timer_thread();
4598}
4599
4600void
4601rb_thread_start_timer_thread(void)
4602{
4603 system_working = 1;
4604 rb_thread_create_timer_thread();
4605}
4606
4607static int
4608clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4609{
4610 int i;
4611 VALUE coverage = (VALUE)val;
4612 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4613 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4614
4615 if (lines) {
4616 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4617 rb_ary_clear(lines);
4618 }
4619 else {
4620 int i;
4621 for (i = 0; i < RARRAY_LEN(lines); i++) {
4622 if (RARRAY_AREF(lines, i) != Qnil)
4623 RARRAY_ASET(lines, i, INT2FIX(0));
4624 }
4625 }
4626 }
4627 if (branches) {
4628 VALUE counters = RARRAY_AREF(branches, 1);
4629 for (i = 0; i < RARRAY_LEN(counters); i++) {
4630 RARRAY_ASET(counters, i, INT2FIX(0));
4631 }
4632 }
4633
4634 return ST_CONTINUE;
4635}
4636
4637void
4638rb_clear_coverages(void)
4639{
4640 VALUE coverages = rb_get_coverages();
4641 if (RTEST(coverages)) {
4642 rb_hash_foreach(coverages, clear_coverage_i, 0);
4643 }
4644}
4645
4646#if defined(HAVE_WORKING_FORK)
4647
4648static void
4649rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4650{
4651 rb_thread_t *i = 0;
4652 rb_vm_t *vm = th->vm;
4653 rb_ractor_t *r = th->ractor;
4654 vm->ractor.main_ractor = r;
4655 vm->ractor.main_thread = th;
4656 r->threads.main = th;
4657 r->status_ = ractor_created;
4658
4659 thread_sched_atfork(TH_SCHED(th));
4660 ubf_list_atfork();
4661
4662 // OK. Only this thread accesses:
4663 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4664 ccan_list_for_each(&r->threads.set, i, lt_node) {
4665 atfork(i, th);
4666 }
4667 }
4668 rb_vm_living_threads_init(vm);
4669
4670 rb_ractor_atfork(vm, th);
4671 rb_vm_postponed_job_atfork();
4672
4673 /* may be held by RJIT threads in parent */
4674 rb_native_mutex_initialize(&vm->workqueue_lock);
4675
4676 /* may be held by any thread in parent */
4677 rb_native_mutex_initialize(&th->interrupt_lock);
4678
4679 vm->fork_gen++;
4680 rb_ractor_sleeper_threads_clear(th->ractor);
4681 rb_clear_coverages();
4682
4683 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4684 rb_thread_reset_timer_thread();
4685 rb_thread_start_timer_thread();
4686
4687 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4688 VM_ASSERT(vm->ractor.cnt == 1);
4689}
4690
4691static void
4692terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4693{
4694 if (th != current_th) {
4695 rb_mutex_abandon_keeping_mutexes(th);
4696 rb_mutex_abandon_locking_mutex(th);
4697 thread_cleanup_func(th, TRUE);
4698 }
4699}
4700
4701void rb_fiber_atfork(rb_thread_t *);
4702void
4703rb_thread_atfork(void)
4704{
4705 rb_thread_t *th = GET_THREAD();
4706 rb_threadptr_pending_interrupt_clear(th);
4707 rb_thread_atfork_internal(th, terminate_atfork_i);
4708 th->join_list = NULL;
4709 rb_fiber_atfork(th);
4710
4711 /* We don't want reproduce CVE-2003-0900. */
4713}
4714
4715static void
4716terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4717{
4718 if (th != current_th) {
4719 thread_cleanup_func_before_exec(th);
4720 }
4721}
4722
4723void
4725{
4726 rb_thread_t *th = GET_THREAD();
4727 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4728}
4729#else
4730void
4731rb_thread_atfork(void)
4732{
4733}
4734
4735void
4737{
4738}
4739#endif
4741struct thgroup {
4742 int enclosed;
4743};
4744
4745static const rb_data_type_t thgroup_data_type = {
4746 "thgroup",
4747 {
4748 0,
4750 NULL, // No external memory to report
4751 },
4752 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4753};
4754
4755/*
4756 * Document-class: ThreadGroup
4757 *
4758 * ThreadGroup provides a means of keeping track of a number of threads as a
4759 * group.
4760 *
4761 * A given Thread object can only belong to one ThreadGroup at a time; adding
4762 * a thread to a new group will remove it from any previous group.
4763 *
4764 * Newly created threads belong to the same group as the thread from which they
4765 * were created.
4766 */
4767
4768/*
4769 * Document-const: Default
4770 *
4771 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4772 * by default.
4773 */
4774static VALUE
4775thgroup_s_alloc(VALUE klass)
4776{
4777 VALUE group;
4778 struct thgroup *data;
4779
4780 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4781 data->enclosed = 0;
4782
4783 return group;
4784}
4785
4786/*
4787 * call-seq:
4788 * thgrp.list -> array
4789 *
4790 * Returns an array of all existing Thread objects that belong to this group.
4791 *
4792 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4793 */
4794
4795static VALUE
4796thgroup_list(VALUE group)
4797{
4798 VALUE ary = rb_ary_new();
4799 rb_thread_t *th = 0;
4800 rb_ractor_t *r = GET_RACTOR();
4801
4802 ccan_list_for_each(&r->threads.set, th, lt_node) {
4803 if (th->thgroup == group) {
4804 rb_ary_push(ary, th->self);
4805 }
4806 }
4807 return ary;
4808}
4809
4810
4811/*
4812 * call-seq:
4813 * thgrp.enclose -> thgrp
4814 *
4815 * Prevents threads from being added to or removed from the receiving
4816 * ThreadGroup.
4817 *
4818 * New threads can still be started in an enclosed ThreadGroup.
4819 *
4820 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4821 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4822 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4823 * tg.add thr
4824 * #=> ThreadError: can't move from the enclosed thread group
4825 */
4826
4827static VALUE
4828thgroup_enclose(VALUE group)
4829{
4830 struct thgroup *data;
4831
4832 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4833 data->enclosed = 1;
4834
4835 return group;
4836}
4837
4838
4839/*
4840 * call-seq:
4841 * thgrp.enclosed? -> true or false
4842 *
4843 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4844 */
4845
4846static VALUE
4847thgroup_enclosed_p(VALUE group)
4848{
4849 struct thgroup *data;
4850
4851 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4852 return RBOOL(data->enclosed);
4853}
4854
4855
4856/*
4857 * call-seq:
4858 * thgrp.add(thread) -> thgrp
4859 *
4860 * Adds the given +thread+ to this group, removing it from any other
4861 * group to which it may have previously been a member.
4862 *
4863 * puts "Initial group is #{ThreadGroup::Default.list}"
4864 * tg = ThreadGroup.new
4865 * t1 = Thread.new { sleep }
4866 * t2 = Thread.new { sleep }
4867 * puts "t1 is #{t1}"
4868 * puts "t2 is #{t2}"
4869 * tg.add(t1)
4870 * puts "Initial group now #{ThreadGroup::Default.list}"
4871 * puts "tg group now #{tg.list}"
4872 *
4873 * This will produce:
4874 *
4875 * Initial group is #<Thread:0x401bdf4c>
4876 * t1 is #<Thread:0x401b3c90>
4877 * t2 is #<Thread:0x401b3c18>
4878 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4879 * tg group now #<Thread:0x401b3c90>
4880 */
4881
4882static VALUE
4883thgroup_add(VALUE group, VALUE thread)
4884{
4885 rb_thread_t *target_th = rb_thread_ptr(thread);
4886 struct thgroup *data;
4887
4888 if (OBJ_FROZEN(group)) {
4889 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4890 }
4891 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4892 if (data->enclosed) {
4893 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4894 }
4895
4896 if (OBJ_FROZEN(target_th->thgroup)) {
4897 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4898 }
4899 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4900 if (data->enclosed) {
4901 rb_raise(rb_eThreadError,
4902 "can't move from the enclosed thread group");
4903 }
4904
4905 target_th->thgroup = group;
4906 return group;
4907}
4908
4909/*
4910 * Document-class: ThreadShield
4911 */
4912static void
4913thread_shield_mark(void *ptr)
4914{
4915 rb_gc_mark((VALUE)ptr);
4916}
4917
4918static const rb_data_type_t thread_shield_data_type = {
4919 "thread_shield",
4920 {thread_shield_mark, 0, 0,},
4921 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4922};
4923
4924static VALUE
4925thread_shield_alloc(VALUE klass)
4926{
4927 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4928}
4929
4930#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4931#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4932#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4933#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4934STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4935static inline unsigned int
4936rb_thread_shield_waiting(VALUE b)
4937{
4938 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4939}
4940
4941static inline void
4942rb_thread_shield_waiting_inc(VALUE b)
4943{
4944 unsigned int w = rb_thread_shield_waiting(b);
4945 w++;
4946 if (w > THREAD_SHIELD_WAITING_MAX)
4947 rb_raise(rb_eRuntimeError, "waiting count overflow");
4948 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4949 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4950}
4951
4952static inline void
4953rb_thread_shield_waiting_dec(VALUE b)
4954{
4955 unsigned int w = rb_thread_shield_waiting(b);
4956 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4957 w--;
4958 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4959 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4960}
4961
4962VALUE
4963rb_thread_shield_new(void)
4964{
4965 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4966 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4967 return thread_shield;
4968}
4969
4970bool
4971rb_thread_shield_owned(VALUE self)
4972{
4973 VALUE mutex = GetThreadShieldPtr(self);
4974 if (!mutex) return false;
4975
4976 rb_mutex_t *m = mutex_ptr(mutex);
4977
4978 return m->fiber == GET_EC()->fiber_ptr;
4979}
4980
4981/*
4982 * Wait a thread shield.
4983 *
4984 * Returns
4985 * true: acquired the thread shield
4986 * false: the thread shield was destroyed and no other threads waiting
4987 * nil: the thread shield was destroyed but still in use
4988 */
4989VALUE
4990rb_thread_shield_wait(VALUE self)
4991{
4992 VALUE mutex = GetThreadShieldPtr(self);
4993 rb_mutex_t *m;
4994
4995 if (!mutex) return Qfalse;
4996 m = mutex_ptr(mutex);
4997 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4998 rb_thread_shield_waiting_inc(self);
4999 rb_mutex_lock(mutex);
5000 rb_thread_shield_waiting_dec(self);
5001 if (DATA_PTR(self)) return Qtrue;
5002 rb_mutex_unlock(mutex);
5003 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5004}
5005
5006static VALUE
5007thread_shield_get_mutex(VALUE self)
5008{
5009 VALUE mutex = GetThreadShieldPtr(self);
5010 if (!mutex)
5011 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5012 return mutex;
5013}
5014
5015/*
5016 * Release a thread shield, and return true if it has waiting threads.
5017 */
5018VALUE
5019rb_thread_shield_release(VALUE self)
5020{
5021 VALUE mutex = thread_shield_get_mutex(self);
5022 rb_mutex_unlock(mutex);
5023 return RBOOL(rb_thread_shield_waiting(self) > 0);
5024}
5025
5026/*
5027 * Release and destroy a thread shield, and return true if it has waiting threads.
5028 */
5029VALUE
5030rb_thread_shield_destroy(VALUE self)
5031{
5032 VALUE mutex = thread_shield_get_mutex(self);
5033 DATA_PTR(self) = 0;
5034 rb_mutex_unlock(mutex);
5035 return RBOOL(rb_thread_shield_waiting(self) > 0);
5036}
5037
5038static VALUE
5039threadptr_recursive_hash(rb_thread_t *th)
5040{
5041 return th->ec->local_storage_recursive_hash;
5042}
5043
5044static void
5045threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5046{
5047 th->ec->local_storage_recursive_hash = hash;
5048}
5049
5051
5052/*
5053 * Returns the current "recursive list" used to detect recursion.
5054 * This list is a hash table, unique for the current thread and for
5055 * the current __callee__.
5056 */
5057
5058static VALUE
5059recursive_list_access(VALUE sym)
5060{
5061 rb_thread_t *th = GET_THREAD();
5062 VALUE hash = threadptr_recursive_hash(th);
5063 VALUE list;
5064 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5065 hash = rb_ident_hash_new();
5066 threadptr_recursive_hash_set(th, hash);
5067 list = Qnil;
5068 }
5069 else {
5070 list = rb_hash_aref(hash, sym);
5071 }
5072 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5073 list = rb_ident_hash_new();
5074 rb_hash_aset(hash, sym, list);
5075 }
5076 return list;
5077}
5078
5079/*
5080 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5081 * in the recursion list.
5082 * Assumes the recursion list is valid.
5083 */
5084
5085static VALUE
5086recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5087{
5088#if SIZEOF_LONG == SIZEOF_VOIDP
5089 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5090#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5091 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5092 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5093#endif
5094
5095 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5096 if (UNDEF_P(pair_list))
5097 return Qfalse;
5098 if (paired_obj_id) {
5099 if (!RB_TYPE_P(pair_list, T_HASH)) {
5100 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5101 return Qfalse;
5102 }
5103 else {
5104 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5105 return Qfalse;
5106 }
5107 }
5108 return Qtrue;
5109}
5110
5111/*
5112 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5113 * For a single obj, it sets list[obj] to Qtrue.
5114 * For a pair, it sets list[obj] to paired_obj_id if possible,
5115 * otherwise list[obj] becomes a hash like:
5116 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5117 * Assumes the recursion list is valid.
5118 */
5119
5120static void
5121recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5122{
5123 VALUE pair_list;
5124
5125 if (!paired_obj) {
5126 rb_hash_aset(list, obj, Qtrue);
5127 }
5128 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5129 rb_hash_aset(list, obj, paired_obj);
5130 }
5131 else {
5132 if (!RB_TYPE_P(pair_list, T_HASH)){
5133 VALUE other_paired_obj = pair_list;
5134 pair_list = rb_hash_new();
5135 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5136 rb_hash_aset(list, obj, pair_list);
5137 }
5138 rb_hash_aset(pair_list, paired_obj, Qtrue);
5139 }
5140}
5141
5142/*
5143 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5144 * For a pair, if list[obj] is a hash, then paired_obj_id is
5145 * removed from the hash and no attempt is made to simplify
5146 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5147 * Assumes the recursion list is valid.
5148 */
5149
5150static int
5151recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5152{
5153 if (paired_obj) {
5154 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5155 if (UNDEF_P(pair_list)) {
5156 return 0;
5157 }
5158 if (RB_TYPE_P(pair_list, T_HASH)) {
5159 rb_hash_delete_entry(pair_list, paired_obj);
5160 if (!RHASH_EMPTY_P(pair_list)) {
5161 return 1; /* keep hash until is empty */
5162 }
5163 }
5164 }
5165 rb_hash_delete_entry(list, obj);
5166 return 1;
5167}
5169struct exec_recursive_params {
5170 VALUE (*func) (VALUE, VALUE, int);
5171 VALUE list;
5172 VALUE obj;
5173 VALUE pairid;
5174 VALUE arg;
5175};
5176
5177static VALUE
5178exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5179{
5180 struct exec_recursive_params *p = (void *)data;
5181 return (*p->func)(p->obj, p->arg, FALSE);
5182}
5183
5184/*
5185 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5186 * current method is called recursively on obj, or on the pair <obj, pairid>
5187 * If outer is 0, then the innermost func will be called with recursive set
5188 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5189 * all inner func are short-circuited by throw.
5190 * Implementation details: the value thrown is the recursive list which is
5191 * proper to the current method and unlikely to be caught anywhere else.
5192 * list[recursive_key] is used as a flag for the outermost call.
5193 */
5194
5195static VALUE
5196exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5197{
5198 VALUE result = Qundef;
5199 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5200 struct exec_recursive_params p;
5201 int outermost;
5202 p.list = recursive_list_access(sym);
5203 p.obj = obj;
5204 p.pairid = pairid;
5205 p.arg = arg;
5206 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5207
5208 if (recursive_check(p.list, p.obj, pairid)) {
5209 if (outer && !outermost) {
5210 rb_throw_obj(p.list, p.list);
5211 }
5212 return (*func)(obj, arg, TRUE);
5213 }
5214 else {
5215 enum ruby_tag_type state;
5216
5217 p.func = func;
5218
5219 if (outermost) {
5220 recursive_push(p.list, ID2SYM(recursive_key), 0);
5221 recursive_push(p.list, p.obj, p.pairid);
5222 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5223 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5224 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5225 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5226 if (result == p.list) {
5227 result = (*func)(obj, arg, TRUE);
5228 }
5229 }
5230 else {
5231 volatile VALUE ret = Qundef;
5232 recursive_push(p.list, p.obj, p.pairid);
5233 EC_PUSH_TAG(GET_EC());
5234 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5235 ret = (*func)(obj, arg, FALSE);
5236 }
5237 EC_POP_TAG();
5238 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5239 goto invalid;
5240 }
5241 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5242 result = ret;
5243 }
5244 }
5245 *(volatile struct exec_recursive_params *)&p;
5246 return result;
5247
5248 invalid:
5249 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5250 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5251 sym, rb_thread_current());
5253}
5254
5255/*
5256 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5257 * current method is called recursively on obj
5258 */
5259
5261rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5262{
5263 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5264}
5265
5266/*
5267 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5268 * current method is called recursively on the ordered pair <obj, paired_obj>
5269 */
5270
5272rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5273{
5274 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5275}
5276
5277/*
5278 * If recursion is detected on the current method and obj, the outermost
5279 * func will be called with (obj, arg, Qtrue). All inner func will be
5280 * short-circuited using throw.
5281 */
5282
5284rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5285{
5286 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5287}
5288
5289VALUE
5290rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5291{
5292 return exec_recursive(func, obj, 0, arg, 1, mid);
5293}
5294
5295/*
5296 * If recursion is detected on the current method, obj and paired_obj,
5297 * the outermost func will be called with (obj, arg, Qtrue). All inner
5298 * func will be short-circuited using throw.
5299 */
5300
5302rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5303{
5304 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5305}
5306
5307/*
5308 * call-seq:
5309 * thread.backtrace -> array or nil
5310 *
5311 * Returns the current backtrace of the target thread.
5312 *
5313 */
5314
5315static VALUE
5316rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5317{
5318 return rb_vm_thread_backtrace(argc, argv, thval);
5319}
5320
5321/* call-seq:
5322 * thread.backtrace_locations(*args) -> array or nil
5323 *
5324 * Returns the execution stack for the target thread---an array containing
5325 * backtrace location objects.
5326 *
5327 * See Thread::Backtrace::Location for more information.
5328 *
5329 * This method behaves similarly to Kernel#caller_locations except it applies
5330 * to a specific thread.
5331 */
5332static VALUE
5333rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5334{
5335 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5336}
5337
5338void
5339Init_Thread_Mutex(void)
5340{
5341 rb_thread_t *th = GET_THREAD();
5342
5343 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5344 rb_native_mutex_initialize(&th->interrupt_lock);
5345}
5346
5347/*
5348 * Document-class: ThreadError
5349 *
5350 * Raised when an invalid operation is attempted on a thread.
5351 *
5352 * For example, when no other thread has been started:
5353 *
5354 * Thread.stop
5355 *
5356 * This will raises the following exception:
5357 *
5358 * ThreadError: stopping only thread
5359 * note: use sleep to stop forever
5360 */
5361
5362void
5363Init_Thread(void)
5364{
5365 VALUE cThGroup;
5366 rb_thread_t *th = GET_THREAD();
5367
5368 sym_never = ID2SYM(rb_intern_const("never"));
5369 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5370 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5371
5372 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5373 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5374 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5375 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5376 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5377 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5378 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5379 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5380 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5381 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5382 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5383 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5384 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5385 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5386 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5387 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5388 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5389 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5390 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5391
5392 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5393 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5394 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5395 rb_define_method(rb_cThread, "value", thread_value, 0);
5396 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5397 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5398 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5399 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5400 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5401 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5402 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5403 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5404 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5405 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5406 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5407 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5408 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5409 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5410 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5411 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5412 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5413 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5414 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5415 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5416 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5417 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5418 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5419 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5420 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5421 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5422
5423 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5424 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5425 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5426 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5427 rb_define_alias(rb_cThread, "inspect", "to_s");
5428
5429 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5430 "stream closed in another thread");
5431
5432 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5433 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5434 rb_define_method(cThGroup, "list", thgroup_list, 0);
5435 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5436 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5437 rb_define_method(cThGroup, "add", thgroup_add, 1);
5438
5439 {
5440 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5441 rb_define_const(cThGroup, "Default", th->thgroup);
5442 }
5443
5445
5446 /* init thread core */
5447 {
5448 /* main thread setting */
5449 {
5450 /* acquire global vm lock */
5451#ifdef HAVE_PTHREAD_NP_H
5452 VM_ASSERT(TH_SCHED(th)->running == th);
5453#endif
5454 // thread_sched_to_running() should not be called because
5455 // it assumes blocked by thread_sched_to_waiting().
5456 // thread_sched_to_running(sched, th);
5457
5458 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5459 th->pending_interrupt_queue_checked = 0;
5460 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5461 }
5462 }
5463
5464 rb_thread_create_timer_thread();
5465
5466 Init_thread_sync();
5467
5468 // TODO: Suppress unused function warning for now
5469 // if (0) rb_thread_sched_destroy(NULL);
5470}
5471
5474{
5475 rb_thread_t *th = ruby_thread_from_native();
5476
5477 return th != 0;
5478}
5479
5480#ifdef NON_SCALAR_THREAD_ID
5481 #define thread_id_str(th) (NULL)
5482#else
5483 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5484#endif
5485
5486static void
5487debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5488{
5489 rb_thread_t *th = 0;
5490 VALUE sep = rb_str_new_cstr("\n ");
5491
5492 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5493 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5494 (void *)GET_THREAD(), (void *)r->threads.main);
5495
5496 ccan_list_for_each(&r->threads.set, th, lt_node) {
5497 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5498 "native:%p int:%u",
5499 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5500
5501 if (th->locking_mutex) {
5502 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5503 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5504 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5505 }
5506
5507 {
5508 struct rb_waiting_list *list = th->join_list;
5509 while (list) {
5510 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5511 list = list->next;
5512 }
5513 }
5514 rb_str_catf(msg, "\n ");
5515 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5516 rb_str_catf(msg, "\n");
5517 }
5518}
5519
5520static void
5521rb_check_deadlock(rb_ractor_t *r)
5522{
5523 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5524
5525#ifdef RUBY_THREAD_PTHREAD_H
5526 if (r->threads.sched.readyq_cnt > 0) return;
5527#endif
5528
5529 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5530 int ltnum = rb_ractor_living_thread_num(r);
5531
5532 if (ltnum > sleeper_num) return;
5533 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5534
5535 int found = 0;
5536 rb_thread_t *th = NULL;
5537
5538 ccan_list_for_each(&r->threads.set, th, lt_node) {
5539 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5540 found = 1;
5541 }
5542 else if (th->locking_mutex) {
5543 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5544 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5545 found = 1;
5546 }
5547 }
5548 if (found)
5549 break;
5550 }
5551
5552 if (!found) {
5553 VALUE argv[2];
5554 argv[0] = rb_eFatal;
5555 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5556 debug_deadlock_check(r, argv[1]);
5557 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5558 rb_threadptr_raise(r->threads.main, 2, argv);
5559 }
5560}
5561
5562// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5563// structs. Defined here because the struct definition lives here as well.
5564size_t
5565rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5566{
5567 struct waiting_fd *waitfd = 0;
5568 size_t size = 0;
5569
5570 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5571 size += sizeof(struct waiting_fd);
5572 }
5573
5574 return size;
5575}
5576
5577static void
5578update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5579{
5580 const rb_control_frame_t *cfp = GET_EC()->cfp;
5581 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5582 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5583 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5584 if (lines) {
5585 long line = rb_sourceline() - 1;
5586 long count;
5587 VALUE num;
5588 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5589 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5590 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5591 rb_ary_push(lines, LONG2FIX(line + 1));
5592 return;
5593 }
5594 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5595 return;
5596 }
5597 num = RARRAY_AREF(lines, line);
5598 if (!FIXNUM_P(num)) return;
5599 count = FIX2LONG(num) + 1;
5600 if (POSFIXABLE(count)) {
5601 RARRAY_ASET(lines, line, LONG2FIX(count));
5602 }
5603 }
5604 }
5605}
5606
5607static void
5608update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5609{
5610 const rb_control_frame_t *cfp = GET_EC()->cfp;
5611 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5612 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5613 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5614 if (branches) {
5615 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5616 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5617 VALUE counters = RARRAY_AREF(branches, 1);
5618 VALUE num = RARRAY_AREF(counters, idx);
5619 count = FIX2LONG(num) + 1;
5620 if (POSFIXABLE(count)) {
5621 RARRAY_ASET(counters, idx, LONG2FIX(count));
5622 }
5623 }
5624 }
5625}
5626
5627const rb_method_entry_t *
5628rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5629{
5630 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5631
5632 if (!me->def) return NULL; // negative cme
5633
5634 retry:
5635 switch (me->def->type) {
5636 case VM_METHOD_TYPE_ISEQ: {
5637 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5638 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5639 path = rb_iseq_path(iseq);
5640 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5641 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5642 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5643 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5644 break;
5645 }
5646 case VM_METHOD_TYPE_BMETHOD: {
5647 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5648 if (iseq) {
5649 rb_iseq_location_t *loc;
5650 rb_iseq_check(iseq);
5651 path = rb_iseq_path(iseq);
5652 loc = &ISEQ_BODY(iseq)->location;
5653 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5654 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5655 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5656 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5657 break;
5658 }
5659 return NULL;
5660 }
5661 case VM_METHOD_TYPE_ALIAS:
5662 me = me->def->body.alias.original_me;
5663 goto retry;
5664 case VM_METHOD_TYPE_REFINED:
5665 me = me->def->body.refined.orig_me;
5666 if (!me) return NULL;
5667 goto retry;
5668 default:
5669 return NULL;
5670 }
5671
5672 /* found */
5673 if (RB_TYPE_P(path, T_ARRAY)) {
5674 path = rb_ary_entry(path, 1);
5675 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5676 }
5677 if (resolved_location) {
5678 resolved_location[0] = path;
5679 resolved_location[1] = beg_pos_lineno;
5680 resolved_location[2] = beg_pos_column;
5681 resolved_location[3] = end_pos_lineno;
5682 resolved_location[4] = end_pos_column;
5683 }
5684 return me;
5685}
5686
5687static void
5688update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5689{
5690 const rb_control_frame_t *cfp = GET_EC()->cfp;
5691 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5692 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5693 VALUE rcount;
5694 long count;
5695
5696 me = rb_resolve_me_location(me, 0);
5697 if (!me) return;
5698
5699 rcount = rb_hash_aref(me2counter, (VALUE) me);
5700 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5701 if (POSFIXABLE(count)) {
5702 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5703 }
5704}
5705
5706VALUE
5707rb_get_coverages(void)
5708{
5709 return GET_VM()->coverages;
5710}
5711
5712int
5713rb_get_coverage_mode(void)
5714{
5715 return GET_VM()->coverage_mode;
5716}
5717
5718void
5719rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5720{
5721 GET_VM()->coverages = coverages;
5722 GET_VM()->me2counter = me2counter;
5723 GET_VM()->coverage_mode = mode;
5724}
5725
5726void
5727rb_resume_coverages(void)
5728{
5729 int mode = GET_VM()->coverage_mode;
5730 VALUE me2counter = GET_VM()->me2counter;
5731 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5732 if (mode & COVERAGE_TARGET_BRANCHES) {
5733 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5734 }
5735 if (mode & COVERAGE_TARGET_METHODS) {
5736 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5737 }
5738}
5739
5740void
5741rb_suspend_coverages(void)
5742{
5743 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5744 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5745 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5746 }
5747 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5748 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5749 }
5750}
5751
5752/* Make coverage arrays empty so old covered files are no longer tracked. */
5753void
5754rb_reset_coverages(void)
5755{
5756 rb_clear_coverages();
5757 rb_iseq_remove_coverage_all();
5758 GET_VM()->coverages = Qfalse;
5759}
5760
5761VALUE
5762rb_default_coverage(int n)
5763{
5764 VALUE coverage = rb_ary_hidden_new_fill(3);
5765 VALUE lines = Qfalse, branches = Qfalse;
5766 int mode = GET_VM()->coverage_mode;
5767
5768 if (mode & COVERAGE_TARGET_LINES) {
5769 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5770 }
5771 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5772
5773 if (mode & COVERAGE_TARGET_BRANCHES) {
5774 branches = rb_ary_hidden_new_fill(2);
5775 /* internal data structures for branch coverage:
5776 *
5777 * { branch base node =>
5778 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5779 * branch target id =>
5780 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5781 * ...
5782 * }],
5783 * ...
5784 * }
5785 *
5786 * Example:
5787 * { NODE_CASE =>
5788 * [1, 0, 4, 3, {
5789 * NODE_WHEN => [2, 8, 2, 9, 0],
5790 * NODE_WHEN => [3, 8, 3, 9, 1],
5791 * ...
5792 * }],
5793 * ...
5794 * }
5795 */
5796 VALUE structure = rb_hash_new();
5797 rb_obj_hide(structure);
5798 RARRAY_ASET(branches, 0, structure);
5799 /* branch execution counters */
5800 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5801 }
5802 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5803
5804 return coverage;
5805}
5806
5807static VALUE
5808uninterruptible_exit(VALUE v)
5809{
5810 rb_thread_t *cur_th = GET_THREAD();
5811 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5812
5813 cur_th->pending_interrupt_queue_checked = 0;
5814 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5815 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5816 }
5817 return Qnil;
5818}
5819
5820VALUE
5821rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5822{
5823 VALUE interrupt_mask = rb_ident_hash_new();
5824 rb_thread_t *cur_th = GET_THREAD();
5825
5826 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5827 OBJ_FREEZE_RAW(interrupt_mask);
5828 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5829
5830 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5831
5832 RUBY_VM_CHECK_INTS(cur_th->ec);
5833 return ret;
5834}
5835
5836static void
5837thread_specific_storage_alloc(rb_thread_t *th)
5838{
5839 VM_ASSERT(th->specific_storage == NULL);
5840
5841 if (UNLIKELY(specific_key_count > 0)) {
5842 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5843 }
5844}
5845
5846rb_internal_thread_specific_key_t
5848{
5849 rb_vm_t *vm = GET_VM();
5850
5851 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5852 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5853 }
5854 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5855 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5856 }
5857 else {
5858 rb_internal_thread_specific_key_t key = specific_key_count++;
5859
5860 if (key == 0) {
5861 // allocate
5862 rb_ractor_t *cr = GET_RACTOR();
5863 rb_thread_t *th;
5864
5865 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5866 thread_specific_storage_alloc(th);
5867 }
5868 }
5869 return key;
5870 }
5871}
5872
5873// async and native thread safe.
5874void *
5875rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5876{
5877 rb_thread_t *th = DATA_PTR(thread_val);
5878
5879 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5880 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5881 VM_ASSERT(th->specific_storage);
5882
5883 return th->specific_storage[key];
5884}
5885
5886// async and native thread safe.
5887void
5888rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5889{
5890 rb_thread_t *th = DATA_PTR(thread_val);
5891
5892 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5893 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5894 VM_ASSERT(th->specific_storage);
5895
5896 th->specific_storage[key] = data;
5897}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:970
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2336
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1096
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:879
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:866
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:136
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:395
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:296
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:482
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1337
VALUE rb_eIOError
IOError exception.
Definition io.c:178
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1341
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3779
VALUE rb_eFatal
fatal exception.
Definition error.c:1340
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1382
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1336
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:884
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4454
VALUE rb_eSignal
SignalException exception.
Definition error.c:1339
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2058
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cThread
Thread class.
Definition vm.c:524
VALUE rb_cModule
Module class.
Definition object.c:65
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3638
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:830
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:807
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1782
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3500
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1435
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3510
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2703
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2942
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
Definition thread.c:5260
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1371
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2644
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1403
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2854
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
Definition thread.c:5271
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4735
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1418
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2845
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2798
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
Definition thread.c:5301
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1378
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4730
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2921
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3782
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3658
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1466
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
Definition thread.c:5283
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2807
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1441
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2881
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1854
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1340
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:283
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1844
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1095
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12042
ID rb_to_id(VALUE str)
Definition string.c:12032
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:179
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5874
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5887
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5846
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1523
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1832
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1651
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
Definition thread.c:1658
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2254
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5472
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:219
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:383
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:180
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:402
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4269
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:298
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:304
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:286
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:292
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40