Skip to content

Commit c22c5f9

Browse files
Jacob LacoutureHParker
authored andcommitted
use a hint avoid the optimistic sched lock drop/reacquire
1 parent a26aed9 commit c22c5f9

File tree

2 files changed

+11
-1
lines changed

2 files changed

+11
-1
lines changed

thread_pthread.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -883,7 +883,7 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
883883
}
884884
thread_sched_set_locked(sched, th);
885885

886-
if (sched->runnable_hot_th != NULL) {
886+
if (sched->runnable_hot_th != NULL && sched->runnable_hot_th_waiting) {
887887
VM_ASSERT(sched->runnable_hot_th != th);
888888
// Give the hot thread a chance to preempt, if it's actively spinning.
889889
// On multicore, this reduces the rate of core-switching. On single-core it
@@ -943,6 +943,7 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
943943
// Control transfer to the current thread is now complete. The original thread
944944
// cannot steal control at this point.
945945
sched->runnable_hot_th = NULL;
946+
sched->runnable_hot_th_waiting = 0;
946947

947948
// VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
948949
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
@@ -980,6 +981,13 @@ thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
980981
static void
981982
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
982983
{
984+
// We are reading and writing these sched fields without lock cover, but
985+
// there are no correctness issues resulting from stale cache or delayed writeback.
986+
// When it works, this causes the next-scheduled thread to yield the sched lock
987+
// briefly so that we can grab it if we're still spinning (not descheduled yet).
988+
if (sched->runnable_hot_th == th) {
989+
sched->runnable_hot_th_waiting = 1;
990+
}
983991
thread_sched_lock(sched, th);
984992
{
985993
thread_sched_to_running_common(sched, th);
@@ -1053,6 +1061,7 @@ thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th, b
10531061
native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
10541062
if (!yield_immediately) {
10551063
sched->runnable_hot_th = th;
1064+
sched->runnable_hot_th_waiting = 0;
10561065
}
10571066
thread_sched_wakeup_next_thread(sched, th, false);
10581067
}

thread_pthread.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,7 @@ struct rb_thread_sched {
134134
// thread completes the transfer of control, it can interrupt and resume running.
135135
// The new thread clears this field when it takes control.
136136
struct rb_thread_struct *runnable_hot_th;
137+
int runnable_hot_th_waiting;
137138
bool is_running;
138139
bool is_running_timeslice;
139140
bool enable_mn_threads;

0 commit comments

Comments
 (0)