2
* Block protocol for I/O error injection
4
* Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to deal
8
* in the Software without restriction, including without limitation the rights
9
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25
#include "qemu-common.h"
26
#include "block_int.h"
29
typedef struct BlkdebugVars {
32
/* If inject_errno != 0, an error is injected for requests */
35
/* Decides if all future requests fail (false) or only the next one and
36
* after the next request inject_errno is reset to 0 (true) */
39
/* Decides if aio_readv/writev fails right away (true) or returns an error
40
* return value only in the callback (false) */
41
bool inject_immediately;
44
typedef struct BDRVBlkdebugState {
46
QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
49
typedef struct BlkdebugAIOCB {
50
BlockDriverAIOCB common;
55
static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
57
static AIOPool blkdebug_aio_pool = {
58
.aiocb_size = sizeof(BlkdebugAIOCB),
59
.cancel = blkdebug_aio_cancel,
67
typedef struct BlkdebugRule {
81
QLIST_ENTRY(BlkdebugRule) next;
84
static QemuOptsList inject_error_opts = {
85
.name = "inject-error",
86
.head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
90
.type = QEMU_OPT_STRING,
94
.type = QEMU_OPT_NUMBER,
98
.type = QEMU_OPT_NUMBER,
102
.type = QEMU_OPT_BOOL,
105
.name = "immediately",
106
.type = QEMU_OPT_BOOL,
108
{ /* end of list */ }
112
static QemuOptsList set_state_opts = {
114
.head = QTAILQ_HEAD_INITIALIZER(set_state_opts.head),
118
.type = QEMU_OPT_STRING,
122
.type = QEMU_OPT_NUMBER,
126
.type = QEMU_OPT_NUMBER,
128
{ /* end of list */ }
132
static QemuOptsList *config_groups[] = {
138
static const char *event_names[BLKDBG_EVENT_MAX] = {
139
[BLKDBG_L1_UPDATE] = "l1_update",
140
[BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
141
[BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
142
[BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
144
[BLKDBG_L2_LOAD] = "l2_load",
145
[BLKDBG_L2_UPDATE] = "l2_update",
146
[BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
147
[BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
148
[BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
150
[BLKDBG_READ] = "read",
151
[BLKDBG_READ_AIO] = "read_aio",
152
[BLKDBG_READ_BACKING] = "read_backing",
153
[BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
154
[BLKDBG_READ_COMPRESSED] = "read_compressed",
156
[BLKDBG_WRITE_AIO] = "write_aio",
157
[BLKDBG_WRITE_COMPRESSED] = "write_compressed",
159
[BLKDBG_VMSTATE_LOAD] = "vmstate_load",
160
[BLKDBG_VMSTATE_SAVE] = "vmstate_save",
162
[BLKDBG_COW_READ] = "cow_read",
163
[BLKDBG_COW_WRITE] = "cow_write",
165
[BLKDBG_REFTABLE_LOAD] = "reftable_load",
166
[BLKDBG_REFTABLE_GROW] = "reftable_grow",
168
[BLKDBG_REFBLOCK_LOAD] = "refblock_load",
169
[BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
170
[BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
171
[BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
172
[BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
173
[BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
174
[BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
175
[BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
176
[BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
178
[BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
179
[BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
180
[BLKDBG_CLUSTER_FREE] = "cluster_free",
183
static int get_event_by_name(const char *name, BlkDebugEvent *event)
187
for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
188
if (!strcmp(event_names[i], name)) {
197
struct add_rule_data {
198
BDRVBlkdebugState *s;
202
static int add_rule(QemuOpts *opts, void *opaque)
204
struct add_rule_data *d = opaque;
205
BDRVBlkdebugState *s = d->s;
206
const char* event_name;
208
struct BlkdebugRule *rule;
210
/* Find the right event for the rule */
211
event_name = qemu_opt_get(opts, "event");
212
if (!event_name || get_event_by_name(event_name, &event) < 0) {
216
/* Set attributes common for all actions */
217
rule = g_malloc0(sizeof(*rule));
218
*rule = (struct BlkdebugRule) {
221
.state = qemu_opt_get_number(opts, "state", 0),
224
/* Parse action-specific options */
226
case ACTION_INJECT_ERROR:
227
rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
228
rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
229
rule->options.inject.immediately =
230
qemu_opt_get_bool(opts, "immediately", 0);
233
case ACTION_SET_STATE:
234
rule->options.set_state.new_state =
235
qemu_opt_get_number(opts, "new_state", 0);
240
QLIST_INSERT_HEAD(&s->rules[event], rule, next);
245
static int read_config(BDRVBlkdebugState *s, const char *filename)
249
struct add_rule_data d;
251
f = fopen(filename, "r");
256
ret = qemu_config_parse(f, config_groups, filename);
262
d.action = ACTION_INJECT_ERROR;
263
qemu_opts_foreach(&inject_error_opts, add_rule, &d, 0);
265
d.action = ACTION_SET_STATE;
266
qemu_opts_foreach(&set_state_opts, add_rule, &d, 0);
270
qemu_opts_reset(&inject_error_opts);
271
qemu_opts_reset(&set_state_opts);
276
/* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
277
static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags)
279
BDRVBlkdebugState *s = bs->opaque;
283
/* Parse the blkdebug: prefix */
284
if (strncmp(filename, "blkdebug:", strlen("blkdebug:"))) {
287
filename += strlen("blkdebug:");
289
/* Read rules from config file */
290
c = strchr(filename, ':');
295
config = strdup(filename);
296
config[c - filename] = '\0';
297
ret = read_config(s, config);
304
/* Set initial state */
307
/* Open the backing file */
308
ret = bdrv_file_open(&bs->file, filename, flags);
316
static void error_callback_bh(void *opaque)
318
struct BlkdebugAIOCB *acb = opaque;
319
qemu_bh_delete(acb->bh);
320
acb->common.cb(acb->common.opaque, acb->ret);
321
qemu_aio_release(acb);
324
static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
326
BlkdebugAIOCB *acb = container_of(blockacb, BlkdebugAIOCB, common);
327
qemu_aio_release(acb);
330
static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
331
BlockDriverCompletionFunc *cb, void *opaque)
333
BDRVBlkdebugState *s = bs->opaque;
334
int error = s->vars.inject_errno;
335
struct BlkdebugAIOCB *acb;
338
if (s->vars.inject_once) {
339
s->vars.inject_errno = 0;
342
if (s->vars.inject_immediately) {
346
acb = qemu_aio_get(&blkdebug_aio_pool, bs, cb, opaque);
349
bh = qemu_bh_new(error_callback_bh, acb);
351
qemu_bh_schedule(bh);
356
static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
357
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
358
BlockDriverCompletionFunc *cb, void *opaque)
360
BDRVBlkdebugState *s = bs->opaque;
362
if (s->vars.inject_errno) {
363
return inject_error(bs, cb, opaque);
366
BlockDriverAIOCB *acb =
367
bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
371
static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
372
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
373
BlockDriverCompletionFunc *cb, void *opaque)
375
BDRVBlkdebugState *s = bs->opaque;
377
if (s->vars.inject_errno) {
378
return inject_error(bs, cb, opaque);
381
BlockDriverAIOCB *acb =
382
bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
386
static void blkdebug_close(BlockDriverState *bs)
388
BDRVBlkdebugState *s = bs->opaque;
389
BlkdebugRule *rule, *next;
392
for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
393
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
394
QLIST_REMOVE(rule, next);
400
static BlockDriverAIOCB *blkdebug_aio_flush(BlockDriverState *bs,
401
BlockDriverCompletionFunc *cb, void *opaque)
403
return bdrv_aio_flush(bs->file, cb, opaque);
406
static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
407
BlkdebugVars *old_vars)
409
BDRVBlkdebugState *s = bs->opaque;
410
BlkdebugVars *vars = &s->vars;
412
/* Only process rules for the current state */
413
if (rule->state && rule->state != old_vars->state) {
417
/* Take the action */
418
switch (rule->action) {
419
case ACTION_INJECT_ERROR:
420
vars->inject_errno = rule->options.inject.error;
421
vars->inject_once = rule->options.inject.once;
422
vars->inject_immediately = rule->options.inject.immediately;
425
case ACTION_SET_STATE:
426
vars->state = rule->options.set_state.new_state;
431
static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
433
BDRVBlkdebugState *s = bs->opaque;
434
struct BlkdebugRule *rule;
435
BlkdebugVars old_vars = s->vars;
437
assert((int)event >= 0 && event < BLKDBG_EVENT_MAX);
439
QLIST_FOREACH(rule, &s->rules[event], next) {
440
process_rule(bs, rule, &old_vars);
444
static BlockDriver bdrv_blkdebug = {
445
.format_name = "blkdebug",
446
.protocol_name = "blkdebug",
448
.instance_size = sizeof(BDRVBlkdebugState),
450
.bdrv_file_open = blkdebug_open,
451
.bdrv_close = blkdebug_close,
453
.bdrv_aio_readv = blkdebug_aio_readv,
454
.bdrv_aio_writev = blkdebug_aio_writev,
455
.bdrv_aio_flush = blkdebug_aio_flush,
457
.bdrv_debug_event = blkdebug_debug_event,
460
static void bdrv_blkdebug_init(void)
462
bdrv_register(&bdrv_blkdebug);
465
block_init(bdrv_blkdebug_init);