34
40
QSIMPLEQ_INIT(&queue->entries);
37
void coroutine_fn qemu_co_queue_wait(CoQueue *queue)
43
void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
39
45
Coroutine *self = qemu_coroutine_self();
40
46
QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
49
qemu_co_mutex_unlock(mutex);
52
/* There is no race condition here. Other threads will call
53
* aio_co_schedule on our AioContext, which can reenter this
54
* coroutine but only after this yield and after the main loop
55
* has gone through the next iteration.
41
57
qemu_coroutine_yield();
42
58
assert(qemu_in_coroutine());
60
/* TODO: OSv implements wait morphing here, where the wakeup
61
* primitive automatically places the woken coroutine on the
62
* mutex's queue. This avoids the thundering herd effect.
65
qemu_co_mutex_lock(mutex);
112
134
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
137
/* The wait records are handled with a multiple-producer, single-consumer
138
* lock-free queue. There cannot be two concurrent pop_waiter() calls
139
* because pop_waiter() can only be called while mutex->handoff is zero.
140
* This can happen in three cases:
141
* - in qemu_co_mutex_unlock, before the hand-off protocol has started.
142
* In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
143
* not take part in the handoff.
144
* - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
145
* qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
146
* the cmpxchg (it will see either 0 or the next sequence value) and
147
* exit. The next hand-off cannot begin until qemu_co_mutex_lock has
149
* - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
150
* In this case another iteration starts with mutex->handoff == 0;
151
* a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
152
* qemu_co_mutex_unlock will go back to case (1).
154
* The following functions manage this queue.
156
typedef struct CoWaitRecord {
158
QSLIST_ENTRY(CoWaitRecord) next;
161
static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
163
w->co = qemu_coroutine_self();
164
QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
167
static void move_waiters(CoMutex *mutex)
169
QSLIST_HEAD(, CoWaitRecord) reversed;
170
QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
171
while (!QSLIST_EMPTY(&reversed)) {
172
CoWaitRecord *w = QSLIST_FIRST(&reversed);
173
QSLIST_REMOVE_HEAD(&reversed, next);
174
QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
178
static CoWaitRecord *pop_waiter(CoMutex *mutex)
182
if (QSLIST_EMPTY(&mutex->to_pop)) {
184
if (QSLIST_EMPTY(&mutex->to_pop)) {
188
w = QSLIST_FIRST(&mutex->to_pop);
189
QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
193
static bool has_waiters(CoMutex *mutex)
195
return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
115
198
void qemu_co_mutex_init(CoMutex *mutex)
117
200
memset(mutex, 0, sizeof(*mutex));
118
qemu_co_queue_init(&mutex->queue);
203
static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
205
/* Read co before co->ctx; pairs with smp_wmb() in
206
* qemu_coroutine_enter().
208
smp_read_barrier_depends();
209
mutex->ctx = co->ctx;
213
static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
216
Coroutine *self = qemu_coroutine_self();
218
unsigned old_handoff;
220
trace_qemu_co_mutex_lock_entry(mutex, self);
222
push_waiter(mutex, &w);
224
/* This is the "Responsibility Hand-Off" protocol; a lock() picks from
225
* a concurrent unlock() the responsibility of waking somebody up.
227
old_handoff = atomic_mb_read(&mutex->handoff);
229
has_waiters(mutex) &&
230
atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
231
/* There can be no concurrent pops, because there can be only
232
* one active handoff at a time.
234
CoWaitRecord *to_wake = pop_waiter(mutex);
235
Coroutine *co = to_wake->co;
237
/* We got the lock ourselves! */
238
assert(to_wake == &w);
243
qemu_co_mutex_wake(mutex, co);
246
qemu_coroutine_yield();
247
trace_qemu_co_mutex_lock_return(mutex, self);
121
250
void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
252
AioContext *ctx = qemu_get_current_aio_context();
123
253
Coroutine *self = qemu_coroutine_self();
125
trace_qemu_co_mutex_lock_entry(mutex, self);
127
while (mutex->locked) {
128
qemu_co_queue_wait(&mutex->queue);
131
mutex->locked = true;
256
/* Running a very small critical section on pthread_mutex_t and CoMutex
257
* shows that pthread_mutex_t is much faster because it doesn't actually
258
* go to sleep. What happens is that the critical section is shorter
259
* than the latency of entering the kernel and thus FUTEX_WAIT always
260
* fails. With CoMutex there is no such latency but you still want to
261
* avoid wait and wakeup. So introduce it artificially.
265
waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
267
while (waiters == 1 && ++i < 1000) {
268
if (atomic_read(&mutex->ctx) == ctx) {
271
if (atomic_read(&mutex->locked) == 0) {
272
goto retry_fast_path;
276
waiters = atomic_fetch_inc(&mutex->locked);
281
trace_qemu_co_mutex_lock_uncontended(mutex, self);
284
qemu_co_mutex_lock_slowpath(ctx, mutex);
132
286
mutex->holder = self;
133
287
self->locks_held++;
135
trace_qemu_co_mutex_lock_return(mutex, self);
138
290
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
142
294
trace_qemu_co_mutex_unlock_entry(mutex, self);
144
assert(mutex->locked == true);
296
assert(mutex->locked);
145
297
assert(mutex->holder == self);
146
298
assert(qemu_in_coroutine());
148
mutex->locked = false;
149
301
mutex->holder = NULL;
150
302
self->locks_held--;
151
qemu_co_queue_next(&mutex->queue);
303
if (atomic_fetch_dec(&mutex->locked) == 1) {
304
/* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
309
CoWaitRecord *to_wake = pop_waiter(mutex);
310
unsigned our_handoff;
313
qemu_co_mutex_wake(mutex, to_wake->co);
317
/* Some concurrent lock() is in progress (we know this because
318
* mutex->locked was >1) but it hasn't yet put itself on the wait
319
* queue. Pick a sequence number for the handoff protocol (not 0).
321
if (++mutex->sequence == 0) {
325
our_handoff = mutex->sequence;
326
atomic_mb_set(&mutex->handoff, our_handoff);
327
if (!has_waiters(mutex)) {
328
/* The concurrent lock has not added itself yet, so it
329
* will be able to pick our handoff.
334
/* Try to do the handoff protocol ourselves; if somebody else has
335
* already taken it, however, we're done and they're responsible.
337
if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
153
342
trace_qemu_co_mutex_unlock_return(mutex, self);