4
* Copyright (c) 2003-2008 Fabrice Bellard
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to deal
8
* in the Software without restriction, including without limitation the rights
9
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25
#include "qemu/osdep.h"
26
#include "qapi/error.h"
27
#include "qemu-common.h"
28
#include "block/aio.h"
29
#include "block/thread-pool.h"
30
#include "qemu/main-loop.h"
31
#include "qemu/atomic.h"
32
#include "block/raw-aio.h"
34
/***********************************************************/
35
/* bottom halves (can be seen as timers which expire ASAP) */
47
void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
50
bh = g_new(QEMUBH, 1);
56
qemu_mutex_lock(&ctx->bh_lock);
57
bh->next = ctx->first_bh;
60
/* Make sure that the members are ready before putting bh into list */
63
qemu_mutex_unlock(&ctx->bh_lock);
67
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
70
bh = g_new(QEMUBH, 1);
76
qemu_mutex_lock(&ctx->bh_lock);
77
bh->next = ctx->first_bh;
78
/* Make sure that the members are ready before putting bh into list */
81
qemu_mutex_unlock(&ctx->bh_lock);
85
void aio_bh_call(QEMUBH *bh)
90
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
91
int aio_bh_poll(AioContext *ctx)
93
QEMUBH *bh, **bhp, *next;
99
for (bh = ctx->first_bh; bh; bh = next) {
100
/* Make sure that fetching bh happens before accessing its members */
101
smp_read_barrier_depends();
103
/* The atomic_xchg is paired with the one in qemu_bh_schedule. The
104
* implicit memory barrier ensures that the callback sees all writes
105
* done by the scheduling thread. It also ensures that the scheduling
106
* thread sees the zero before bh->cb has run, and thus will call
107
* aio_notify again if necessary.
109
if (atomic_xchg(&bh->scheduled, 0)) {
110
/* Idle BHs don't count as progress */
121
/* remove deleted bhs */
122
if (!ctx->walking_bh) {
123
qemu_mutex_lock(&ctx->bh_lock);
124
bhp = &ctx->first_bh;
127
if (bh->deleted && !bh->scheduled) {
134
qemu_mutex_unlock(&ctx->bh_lock);
140
void qemu_bh_schedule_idle(QEMUBH *bh)
143
/* Make sure that idle & any writes needed by the callback are done
144
* before the locations are read in the aio_bh_poll.
146
atomic_mb_set(&bh->scheduled, 1);
149
void qemu_bh_schedule(QEMUBH *bh)
155
/* The memory barrier implicit in atomic_xchg makes sure that:
156
* 1. idle & any writes needed by the callback are done before the
157
* locations are read in the aio_bh_poll.
158
* 2. ctx is loaded before scheduled is set and the callback has a chance
161
if (atomic_xchg(&bh->scheduled, 1) == 0) {
167
/* This func is async.
169
void qemu_bh_cancel(QEMUBH *bh)
174
/* This func is async.The bottom half will do the delete action at the finial
177
void qemu_bh_delete(QEMUBH *bh)
184
aio_compute_timeout(AioContext *ctx)
190
for (bh = ctx->first_bh; bh; bh = bh->next) {
193
/* idle bottom halves will be polled at least
197
/* non-idle bottom halves will be executed
204
deadline = timerlistgroup_deadline_ns(&ctx->tlg);
208
return qemu_soonest_timeout(timeout, deadline);
213
aio_ctx_prepare(GSource *source, gint *timeout)
215
AioContext *ctx = (AioContext *) source;
217
atomic_or(&ctx->notify_me, 1);
219
/* We assume there is no timeout already supplied */
220
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
222
if (aio_prepare(ctx)) {
226
return *timeout == 0;
230
aio_ctx_check(GSource *source)
232
AioContext *ctx = (AioContext *) source;
235
atomic_and(&ctx->notify_me, ~1);
236
aio_notify_accept(ctx);
238
for (bh = ctx->first_bh; bh; bh = bh->next) {
243
return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
247
aio_ctx_dispatch(GSource *source,
248
GSourceFunc callback,
251
AioContext *ctx = (AioContext *) source;
253
assert(callback == NULL);
259
aio_ctx_finalize(GSource *source)
261
AioContext *ctx = (AioContext *) source;
263
thread_pool_free(ctx->thread_pool);
265
#ifdef CONFIG_LINUX_AIO
266
if (ctx->linux_aio) {
267
laio_detach_aio_context(ctx->linux_aio, ctx);
268
laio_cleanup(ctx->linux_aio);
269
ctx->linux_aio = NULL;
273
qemu_mutex_lock(&ctx->bh_lock);
274
while (ctx->first_bh) {
275
QEMUBH *next = ctx->first_bh->next;
277
/* qemu_bh_delete() must have been called on BHs in this AioContext */
278
assert(ctx->first_bh->deleted);
280
g_free(ctx->first_bh);
281
ctx->first_bh = next;
283
qemu_mutex_unlock(&ctx->bh_lock);
285
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
286
event_notifier_cleanup(&ctx->notifier);
287
qemu_rec_mutex_destroy(&ctx->lock);
288
qemu_mutex_destroy(&ctx->bh_lock);
289
timerlistgroup_deinit(&ctx->tlg);
292
static GSourceFuncs aio_source_funcs = {
299
GSource *aio_get_g_source(AioContext *ctx)
301
g_source_ref(&ctx->source);
305
ThreadPool *aio_get_thread_pool(AioContext *ctx)
307
if (!ctx->thread_pool) {
308
ctx->thread_pool = thread_pool_new(ctx);
310
return ctx->thread_pool;
313
#ifdef CONFIG_LINUX_AIO
314
LinuxAioState *aio_get_linux_aio(AioContext *ctx)
316
if (!ctx->linux_aio) {
317
ctx->linux_aio = laio_init();
318
laio_attach_aio_context(ctx->linux_aio, ctx);
320
return ctx->linux_aio;
324
void aio_notify(AioContext *ctx)
326
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
327
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
330
if (ctx->notify_me) {
331
event_notifier_set(&ctx->notifier);
332
atomic_mb_set(&ctx->notified, true);
336
void aio_notify_accept(AioContext *ctx)
338
if (atomic_xchg(&ctx->notified, false)) {
339
event_notifier_test_and_clear(&ctx->notifier);
343
static void aio_timerlist_notify(void *opaque)
348
static void event_notifier_dummy_cb(EventNotifier *e)
352
AioContext *aio_context_new(Error **errp)
357
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
358
aio_context_setup(ctx);
360
ret = event_notifier_init(&ctx->notifier, false);
362
error_setg_errno(errp, -ret, "Failed to initialize event notifier");
365
g_source_set_can_recurse(&ctx->source, true);
366
aio_set_event_notifier(ctx, &ctx->notifier,
368
(EventNotifierHandler *)
369
event_notifier_dummy_cb);
370
#ifdef CONFIG_LINUX_AIO
371
ctx->linux_aio = NULL;
373
ctx->thread_pool = NULL;
374
qemu_mutex_init(&ctx->bh_lock);
375
qemu_rec_mutex_init(&ctx->lock);
376
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
380
g_source_destroy(&ctx->source);
384
void aio_context_ref(AioContext *ctx)
386
g_source_ref(&ctx->source);
389
void aio_context_unref(AioContext *ctx)
391
g_source_unref(&ctx->source);
394
void aio_context_acquire(AioContext *ctx)
396
qemu_rec_mutex_lock(&ctx->lock);
399
void aio_context_release(AioContext *ctx)
401
qemu_rec_mutex_unlock(&ctx->lock);