~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise-security

« back to all changes in this revision

Viewing changes to drivers/target/target_core_iblock.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2011-12-06 15:56:07 UTC
  • Revision ID: package-import@ubuntu.com-20111206155607-pcf44kv5fmhk564f
Tags: 3.2.0-1401.1
[ Paolo Pisati ]

* Rebased on top of Ubuntu-3.2.0-3.8
* Tilt-tracking @ ef2487af4bb15bdd0689631774b5a5e3a59f74e2
* Delete debian.ti-omap4/control, it shoudln't be tracked
* Fix architecture spelling (s/armel/armhf/)
* [Config] Update configs following 3.2 import
* [Config] Fix compilation: disable CODA and ARCH_OMAP3
* [Config] Fix compilation: disable Ethernet Faraday
* Update series to precise

Show diffs side-by-side

added added

removed removed

Lines of Context:
27
27
 *
28
28
 ******************************************************************************/
29
29
 
30
 
#include <linux/version.h>
31
30
#include <linux/string.h>
32
31
#include <linux/parser.h>
33
32
#include <linux/timer.h>
38
37
#include <linux/bio.h>
39
38
#include <linux/genhd.h>
40
39
#include <linux/file.h>
 
40
#include <linux/module.h>
41
41
#include <scsi/scsi.h>
42
42
#include <scsi/scsi_host.h>
43
43
 
47
47
 
48
48
#include "target_core_iblock.h"
49
49
 
50
 
#if 0
51
 
#define DEBUG_IBLOCK(x...) printk(x)
52
 
#else
53
 
#define DEBUG_IBLOCK(x...)
54
 
#endif
55
 
 
56
50
static struct se_subsystem_api iblock_template;
57
51
 
58
52
static void iblock_bio_done(struct bio *, int);
66
60
        struct iblock_hba *ib_host;
67
61
 
68
62
        ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
69
 
        if (!(ib_host)) {
70
 
                printk(KERN_ERR "Unable to allocate memory for"
 
63
        if (!ib_host) {
 
64
                pr_err("Unable to allocate memory for"
71
65
                                " struct iblock_hba\n");
72
66
                return -ENOMEM;
73
67
        }
74
68
 
75
69
        ib_host->iblock_host_id = host_id;
76
70
 
77
 
        atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
78
 
        atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
79
 
        hba->hba_ptr = (void *) ib_host;
 
71
        hba->hba_ptr = ib_host;
80
72
 
81
 
        printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 
73
        pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
82
74
                " Generic Target Core Stack %s\n", hba->hba_id,
83
75
                IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
84
76
 
85
 
        printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
86
 
                " Target Core TCQ Depth: %d\n", hba->hba_id,
87
 
                ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
 
77
        pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
 
78
                hba->hba_id, ib_host->iblock_host_id);
88
79
 
89
80
        return 0;
90
81
}
93
84
{
94
85
        struct iblock_hba *ib_host = hba->hba_ptr;
95
86
 
96
 
        printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
 
87
        pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
97
88
                " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
98
89
 
99
90
        kfree(ib_host);
106
97
        struct iblock_hba *ib_host = hba->hba_ptr;
107
98
 
108
99
        ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
109
 
        if (!(ib_dev)) {
110
 
                printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
 
100
        if (!ib_dev) {
 
101
                pr_err("Unable to allocate struct iblock_dev\n");
111
102
                return NULL;
112
103
        }
113
104
        ib_dev->ibd_host = ib_host;
114
105
 
115
 
        printk(KERN_INFO  "IBLOCK: Allocated ib_dev for %s\n", name);
 
106
        pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
116
107
 
117
108
        return ib_dev;
118
109
}
131
122
        u32 dev_flags = 0;
132
123
        int ret = -EINVAL;
133
124
 
134
 
        if (!(ib_dev)) {
135
 
                printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
 
125
        if (!ib_dev) {
 
126
                pr_err("Unable to locate struct iblock_dev parameter\n");
136
127
                return ERR_PTR(ret);
137
128
        }
138
129
        memset(&dev_limits, 0, sizeof(struct se_dev_limits));
140
131
         * These settings need to be made tunable..
141
132
         */
142
133
        ib_dev->ibd_bio_set = bioset_create(32, 64);
143
 
        if (!(ib_dev->ibd_bio_set)) {
144
 
                printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
 
134
        if (!ib_dev->ibd_bio_set) {
 
135
                pr_err("IBLOCK: Unable to create bioset()\n");
145
136
                return ERR_PTR(-ENOMEM);
146
137
        }
147
 
        printk(KERN_INFO "IBLOCK: Created bio_set()\n");
 
138
        pr_debug("IBLOCK: Created bio_set()\n");
148
139
        /*
149
140
         * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
150
141
         * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
151
142
         */
152
 
        printk(KERN_INFO  "IBLOCK: Claiming struct block_device: %s\n",
 
143
        pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
153
144
                        ib_dev->ibd_udev_path);
154
145
 
155
146
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
167
158
        limits->logical_block_size = bdev_logical_block_size(bd);
168
159
        limits->max_hw_sectors = queue_max_hw_sectors(q);
169
160
        limits->max_sectors = queue_max_sectors(q);
170
 
        dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
171
 
        dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
 
161
        dev_limits.hw_queue_depth = q->nr_requests;
 
162
        dev_limits.queue_depth = q->nr_requests;
172
163
 
173
 
        ib_dev->ibd_major = MAJOR(bd->bd_dev);
174
 
        ib_dev->ibd_minor = MINOR(bd->bd_dev);
175
164
        ib_dev->ibd_bd = bd;
176
165
 
177
166
        dev = transport_add_device_to_core_hba(hba,
178
 
                        &iblock_template, se_dev, dev_flags, (void *)ib_dev,
 
167
                        &iblock_template, se_dev, dev_flags, ib_dev,
179
168
                        &dev_limits, "IBLOCK", IBLOCK_VERSION);
180
 
        if (!(dev))
 
169
        if (!dev)
181
170
                goto failed;
182
171
 
183
 
        ib_dev->ibd_depth = dev->queue_depth;
184
 
 
185
172
        /*
186
173
         * Check if the underlying struct block_device request_queue supports
187
174
         * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
188
175
         * in ATA and we need to set TPE=1
189
176
         */
190
177
        if (blk_queue_discard(q)) {
191
 
                DEV_ATTRIB(dev)->max_unmap_lba_count =
 
178
                dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
192
179
                                q->limits.max_discard_sectors;
193
180
                /*
194
181
                 * Currently hardcoded to 1 in Linux/SCSI code..
195
182
                 */
196
 
                DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
197
 
                DEV_ATTRIB(dev)->unmap_granularity =
 
183
                dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
 
184
                dev->se_sub_dev->se_dev_attrib.unmap_granularity =
198
185
                                q->limits.discard_granularity;
199
 
                DEV_ATTRIB(dev)->unmap_granularity_alignment =
 
186
                dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
200
187
                                q->limits.discard_alignment;
201
188
 
202
 
                printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
 
189
                pr_debug("IBLOCK: BLOCK Discard support available,"
203
190
                                " disabled by default\n");
204
191
        }
205
192
 
 
193
        if (blk_queue_nonrot(q))
 
194
                dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
 
195
 
206
196
        return dev;
207
197
 
208
198
failed:
211
201
                ib_dev->ibd_bio_set = NULL;
212
202
        }
213
203
        ib_dev->ibd_bd = NULL;
214
 
        ib_dev->ibd_major = 0;
215
 
        ib_dev->ibd_minor = 0;
216
204
        return ERR_PTR(ret);
217
205
}
218
206
 
233
221
}
234
222
 
235
223
static struct se_task *
236
 
iblock_alloc_task(struct se_cmd *cmd)
 
224
iblock_alloc_task(unsigned char *cdb)
237
225
{
238
226
        struct iblock_req *ib_req;
239
227
 
240
228
        ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
241
 
        if (!(ib_req)) {
242
 
                printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
 
229
        if (!ib_req) {
 
230
                pr_err("Unable to allocate memory for struct iblock_req\n");
243
231
                return NULL;
244
232
        }
245
233
 
246
 
        ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
247
234
        atomic_set(&ib_req->ib_bio_cnt, 0);
248
235
        return &ib_req->ib_task;
249
236
}
257
244
                                        bdev_logical_block_size(bd)) - 1);
258
245
        u32 block_size = bdev_logical_block_size(bd);
259
246
 
260
 
        if (block_size == DEV_ATTRIB(dev)->block_size)
 
247
        if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
261
248
                return blocks_long;
262
249
 
263
250
        switch (block_size) {
264
251
        case 4096:
265
 
                switch (DEV_ATTRIB(dev)->block_size) {
 
252
                switch (dev->se_sub_dev->se_dev_attrib.block_size) {
266
253
                case 2048:
267
254
                        blocks_long <<= 1;
268
255
                        break;
276
263
                }
277
264
                break;
278
265
        case 2048:
279
 
                switch (DEV_ATTRIB(dev)->block_size) {
 
266
                switch (dev->se_sub_dev->se_dev_attrib.block_size) {
280
267
                case 4096:
281
268
                        blocks_long >>= 1;
282
269
                        break;
291
278
                }
292
279
                break;
293
280
        case 1024:
294
 
                switch (DEV_ATTRIB(dev)->block_size) {
 
281
                switch (dev->se_sub_dev->se_dev_attrib.block_size) {
295
282
                case 4096:
296
283
                        blocks_long >>= 2;
297
284
                        break;
306
293
                }
307
294
                break;
308
295
        case 512:
309
 
                switch (DEV_ATTRIB(dev)->block_size) {
 
296
                switch (dev->se_sub_dev->se_dev_attrib.block_size) {
310
297
                case 4096:
311
298
                        blocks_long >>= 3;
312
299
                        break;
327
314
        return blocks_long;
328
315
}
329
316
 
 
317
static void iblock_end_io_flush(struct bio *bio, int err)
 
318
{
 
319
        struct se_cmd *cmd = bio->bi_private;
 
320
 
 
321
        if (err)
 
322
                pr_err("IBLOCK: cache flush failed: %d\n", err);
 
323
 
 
324
        if (cmd)
 
325
                transport_complete_sync_cache(cmd, err == 0);
 
326
        bio_put(bio);
 
327
}
 
328
 
330
329
/*
331
 
 * Emulate SYCHRONIZE_CACHE_*
 
330
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 
331
 * always flush the whole cache.
332
332
 */
333
333
static void iblock_emulate_sync_cache(struct se_task *task)
334
334
{
335
 
        struct se_cmd *cmd = TASK_CMD(task);
 
335
        struct se_cmd *cmd = task->task_se_cmd;
336
336
        struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
337
 
        int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
338
 
        sector_t error_sector;
339
 
        int ret;
 
337
        int immed = (cmd->t_task_cdb[1] & 0x2);
 
338
        struct bio *bio;
340
339
 
341
340
        /*
342
341
         * If the Immediate bit is set, queue up the GOOD response
343
 
         * for this SYNCHRONIZE_CACHE op
 
342
         * for this SYNCHRONIZE_CACHE op.
344
343
         */
345
344
        if (immed)
346
345
                transport_complete_sync_cache(cmd, 1);
347
346
 
348
 
        /*
349
 
         * blkdev_issue_flush() does not support a specifying a range, so
350
 
         * we have to flush the entire cache.
351
 
         */
352
 
        ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
353
 
        if (ret != 0) {
354
 
                printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
355
 
                        " error_sector: %llu\n", ret,
356
 
                        (unsigned long long)error_sector);
357
 
        }
358
 
 
 
347
        bio = bio_alloc(GFP_KERNEL, 0);
 
348
        bio->bi_end_io = iblock_end_io_flush;
 
349
        bio->bi_bdev = ib_dev->ibd_bd;
359
350
        if (!immed)
360
 
                transport_complete_sync_cache(cmd, ret == 0);
361
 
}
362
 
 
363
 
/*
364
 
 * Tell TCM Core that we are capable of WriteCache emulation for
365
 
 * an underlying struct se_device.
366
 
 */
367
 
static int iblock_emulated_write_cache(struct se_device *dev)
368
 
{
369
 
        return 1;
370
 
}
371
 
 
372
 
static int iblock_emulated_dpo(struct se_device *dev)
373
 
{
374
 
        return 0;
375
 
}
376
 
 
377
 
/*
378
 
 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
379
 
 * for TYPE_DISK.
380
 
 */
381
 
static int iblock_emulated_fua_write(struct se_device *dev)
382
 
{
383
 
        return 1;
384
 
}
385
 
 
386
 
static int iblock_emulated_fua_read(struct se_device *dev)
387
 
{
388
 
        return 0;
389
 
}
390
 
 
391
 
static int iblock_do_task(struct se_task *task)
392
 
{
393
 
        struct se_device *dev = task->task_se_cmd->se_dev;
394
 
        struct iblock_req *req = IBLOCK_REQ(task);
395
 
        struct bio *bio = req->ib_bio, *nbio = NULL;
396
 
        struct blk_plug plug;
397
 
        int rw;
398
 
 
399
 
        if (task->task_data_direction == DMA_TO_DEVICE) {
400
 
                /*
401
 
                 * Force data to disk if we pretend to not have a volatile
402
 
                 * write cache, or the initiator set the Force Unit Access bit.
403
 
                 */
404
 
                if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
405
 
                    (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
406
 
                     T_TASK(task->task_se_cmd)->t_tasks_fua))
407
 
                        rw = WRITE_FUA;
408
 
                else
409
 
                        rw = WRITE;
410
 
        } else {
411
 
                rw = READ;
412
 
        }
413
 
 
414
 
        blk_start_plug(&plug);
415
 
        while (bio) {
416
 
                nbio = bio->bi_next;
417
 
                bio->bi_next = NULL;
418
 
                DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
419
 
                        " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
420
 
 
421
 
                submit_bio(rw, bio);
422
 
                bio = nbio;
423
 
        }
424
 
        blk_finish_plug(&plug);
425
 
 
426
 
        return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 
351
                bio->bi_private = cmd;
 
352
        submit_bio(WRITE_FLUSH, bio);
427
353
}
428
354
 
429
355
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
437
363
 
438
364
static void iblock_free_task(struct se_task *task)
439
365
{
440
 
        struct iblock_req *req = IBLOCK_REQ(task);
441
 
        struct bio *bio, *hbio = req->ib_bio;
442
 
        /*
443
 
         * We only release the bio(s) here if iblock_bio_done() has not called
444
 
         * bio_put() -> iblock_bio_destructor().
445
 
         */
446
 
        while (hbio != NULL) {
447
 
                bio = hbio;
448
 
                hbio = hbio->bi_next;
449
 
                bio->bi_next = NULL;
450
 
                bio_put(bio);
451
 
        }
452
 
 
453
 
        kfree(req);
 
366
        kfree(IBLOCK_REQ(task));
454
367
}
455
368
 
456
369
enum {
470
383
        struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
471
384
        char *orig, *ptr, *arg_p, *opts;
472
385
        substring_t args[MAX_OPT_ARGS];
473
 
        int ret = 0, arg, token;
 
386
        int ret = 0, token;
474
387
 
475
388
        opts = kstrdup(page, GFP_KERNEL);
476
389
        if (!opts)
486
399
                switch (token) {
487
400
                case Opt_udev_path:
488
401
                        if (ib_dev->ibd_bd) {
489
 
                                printk(KERN_ERR "Unable to set udev_path= while"
 
402
                                pr_err("Unable to set udev_path= while"
490
403
                                        " ib_dev->ibd_bd exists\n");
491
404
                                ret = -EEXIST;
492
405
                                goto out;
499
412
                        snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
500
413
                                        "%s", arg_p);
501
414
                        kfree(arg_p);
502
 
                        printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
 
415
                        pr_debug("IBLOCK: Referencing UDEV path: %s\n",
503
416
                                        ib_dev->ibd_udev_path);
504
417
                        ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
505
418
                        break;
506
419
                case Opt_force:
507
 
                        match_int(args, &arg);
508
 
                        ib_dev->ibd_force = arg;
509
 
                        printk(KERN_INFO "IBLOCK: Set force=%d\n",
510
 
                                ib_dev->ibd_force);
511
420
                        break;
512
421
                default:
513
422
                        break;
526
435
        struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
527
436
 
528
437
        if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
529
 
                printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
530
 
                return -1;
 
438
                pr_err("Missing udev_path= parameters for IBLOCK\n");
 
439
                return -EINVAL;
531
440
        }
532
441
 
533
442
        return 0;
555
464
        bl += sprintf(b + bl, "        ");
556
465
        if (bd) {
557
466
                bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
558
 
                        ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
 
467
                        MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
559
468
                        "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
560
469
                        "CLAIMED: IBLOCK" : "CLAIMED: OS");
561
470
        } else {
562
 
                bl += sprintf(b + bl, "Major: %d Minor: %d\n",
563
 
                        ibd->ibd_major, ibd->ibd_minor);
 
471
                bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
564
472
        }
565
473
 
566
474
        return bl;
569
477
static void iblock_bio_destructor(struct bio *bio)
570
478
{
571
479
        struct se_task *task = bio->bi_private;
572
 
        struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
 
480
        struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
573
481
 
574
482
        bio_free(bio, ib_dev->ibd_bio_set);
575
483
}
576
484
 
577
 
static struct bio *iblock_get_bio(
578
 
        struct se_task *task,
579
 
        struct iblock_req *ib_req,
580
 
        struct iblock_dev *ib_dev,
581
 
        int *ret,
582
 
        sector_t lba,
583
 
        u32 sg_num)
 
485
static struct bio *
 
486
iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
584
487
{
 
488
        struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
 
489
        struct iblock_req *ib_req = IBLOCK_REQ(task);
585
490
        struct bio *bio;
586
491
 
587
492
        bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
588
 
        if (!(bio)) {
589
 
                printk(KERN_ERR "Unable to allocate memory for bio\n");
590
 
                *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
 
493
        if (!bio) {
 
494
                pr_err("Unable to allocate memory for bio\n");
591
495
                return NULL;
592
496
        }
593
497
 
594
 
        DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
595
 
                " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
596
 
        DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
 
498
        pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
 
499
                " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
 
500
        pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
597
501
 
598
502
        bio->bi_bdev = ib_dev->ibd_bd;
599
 
        bio->bi_private = (void *) task;
 
503
        bio->bi_private = task;
600
504
        bio->bi_destructor = iblock_bio_destructor;
601
505
        bio->bi_end_io = &iblock_bio_done;
602
506
        bio->bi_sector = lba;
603
507
        atomic_inc(&ib_req->ib_bio_cnt);
604
508
 
605
 
        DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
606
 
        DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
 
509
        pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
 
510
        pr_debug("Set ib_req->ib_bio_cnt: %d\n",
607
511
                        atomic_read(&ib_req->ib_bio_cnt));
608
512
        return bio;
609
513
}
610
514
 
611
 
static int iblock_map_task_SG(struct se_task *task)
 
515
static int iblock_do_task(struct se_task *task)
612
516
{
613
517
        struct se_cmd *cmd = task->task_se_cmd;
614
 
        struct se_device *dev = SE_DEV(cmd);
615
 
        struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
616
 
        struct iblock_req *ib_req = IBLOCK_REQ(task);
617
 
        struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
 
518
        struct se_device *dev = cmd->se_dev;
 
519
        struct bio *bio;
 
520
        struct bio_list list;
618
521
        struct scatterlist *sg;
619
 
        int ret = 0;
620
 
        u32 i, sg_num = task->task_sg_num;
 
522
        u32 i, sg_num = task->task_sg_nents;
621
523
        sector_t block_lba;
 
524
        struct blk_plug plug;
 
525
        int rw;
 
526
 
 
527
        if (task->task_data_direction == DMA_TO_DEVICE) {
 
528
                /*
 
529
                 * Force data to disk if we pretend to not have a volatile
 
530
                 * write cache, or the initiator set the Force Unit Access bit.
 
531
                 */
 
532
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
 
533
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
 
534
                     task->task_se_cmd->t_tasks_fua))
 
535
                        rw = WRITE_FUA;
 
536
                else
 
537
                        rw = WRITE;
 
538
        } else {
 
539
                rw = READ;
 
540
        }
 
541
 
622
542
        /*
623
543
         * Do starting conversion up from non 512-byte blocksize with
624
544
         * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
625
545
         */
626
 
        if (DEV_ATTRIB(dev)->block_size == 4096)
 
546
        if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
627
547
                block_lba = (task->task_lba << 3);
628
 
        else if (DEV_ATTRIB(dev)->block_size == 2048)
 
548
        else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
629
549
                block_lba = (task->task_lba << 2);
630
 
        else if (DEV_ATTRIB(dev)->block_size == 1024)
 
550
        else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
631
551
                block_lba = (task->task_lba << 1);
632
 
        else if (DEV_ATTRIB(dev)->block_size == 512)
 
552
        else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
633
553
                block_lba = task->task_lba;
634
554
        else {
635
 
                printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
636
 
                                " %u\n", DEV_ATTRIB(dev)->block_size);
 
555
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
 
556
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
637
557
                return PYX_TRANSPORT_LU_COMM_FAILURE;
638
558
        }
639
559
 
640
 
        bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
641
 
        if (!(bio))
642
 
                return ret;
643
 
 
644
 
        ib_req->ib_bio = bio;
645
 
        hbio = tbio = bio;
646
 
        /*
647
 
         * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
648
 
         * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
649
 
         */
650
 
        for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
651
 
                DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
652
 
                        " %p len: %u offset: %u\n", task, bio, sg_page(sg),
653
 
                                sg->length, sg->offset);
654
 
again:
655
 
                ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
656
 
                if (ret != sg->length) {
657
 
 
658
 
                        DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
659
 
                                        bio->bi_sector);
660
 
                        DEBUG_IBLOCK("** task->task_size: %u\n",
661
 
                                        task->task_size);
662
 
                        DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
663
 
                                        bio->bi_max_vecs);
664
 
                        DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
665
 
                                        bio->bi_vcnt);
666
 
 
667
 
                        bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
668
 
                                                block_lba, sg_num);
669
 
                        if (!(bio))
 
560
        bio = iblock_get_bio(task, block_lba, sg_num);
 
561
        if (!bio)
 
562
                return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
 
563
 
 
564
        bio_list_init(&list);
 
565
        bio_list_add(&list, bio);
 
566
 
 
567
        for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
 
568
                /*
 
569
                 * XXX: if the length the device accepts is shorter than the
 
570
                 *      length of the S/G list entry this will cause and
 
571
                 *      endless loop.  Better hope no driver uses huge pages.
 
572
                 */
 
573
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 
574
                                != sg->length) {
 
575
                        bio = iblock_get_bio(task, block_lba, sg_num);
 
576
                        if (!bio)
670
577
                                goto fail;
 
578
                        bio_list_add(&list, bio);
 
579
                }
671
580
 
672
 
                        tbio = tbio->bi_next = bio;
673
 
                        DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
674
 
                                " list, Going to again\n", bio);
675
 
                        goto again;
676
 
                }
677
581
                /* Always in 512 byte units for Linux/Block */
678
582
                block_lba += sg->length >> IBLOCK_LBA_SHIFT;
679
583
                sg_num--;
680
 
                DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
681
 
                        " sg_num to %u\n", task, sg_num);
682
 
                DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
683
 
                                " to %llu\n", task, block_lba);
684
 
                DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
685
 
                                " %u\n", task, bio->bi_vcnt);
686
584
        }
687
585
 
688
 
        return 0;
 
586
        blk_start_plug(&plug);
 
587
        while ((bio = bio_list_pop(&list)))
 
588
                submit_bio(rw, bio);
 
589
        blk_finish_plug(&plug);
 
590
 
 
591
        return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 
592
 
689
593
fail:
690
 
        while (hbio) {
691
 
                bio = hbio;
692
 
                hbio = hbio->bi_next;
693
 
                bio->bi_next = NULL;
 
594
        while ((bio = bio_list_pop(&list)))
694
595
                bio_put(bio);
695
 
        }
696
 
        return ret;
697
 
}
698
 
 
699
 
static unsigned char *iblock_get_cdb(struct se_task *task)
700
 
{
701
 
        return IBLOCK_REQ(task)->ib_scsi_cdb;
 
596
        return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
702
597
}
703
598
 
704
599
static u32 iblock_get_device_rev(struct se_device *dev)
724
619
{
725
620
        struct se_task *task = bio->bi_private;
726
621
        struct iblock_req *ibr = IBLOCK_REQ(task);
 
622
 
727
623
        /*
728
624
         * Set -EIO if !BIO_UPTODATE and the passed is still err=0
729
625
         */
730
 
        if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
 
626
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
731
627
                err = -EIO;
732
628
 
733
629
        if (err != 0) {
734
 
                printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
 
630
                pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
735
631
                        " err: %d\n", bio, err);
736
632
                /*
737
633
                 * Bump the ib_bio_err_cnt and release bio.
738
634
                 */
739
635
                atomic_inc(&ibr->ib_bio_err_cnt);
740
636
                smp_mb__after_atomic_inc();
741
 
                bio_put(bio);
742
 
                /*
743
 
                 * Wait to complete the task until the last bio as completed.
744
 
                 */
745
 
                if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
746
 
                        return;
747
 
 
748
 
                ibr->ib_bio = NULL;
749
 
                transport_complete_task(task, 0);
750
 
                return;
751
637
        }
752
 
        DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
753
 
                task, bio, task->task_lba, bio->bi_sector, err);
754
 
        /*
755
 
         * bio_put() will call iblock_bio_destructor() to release the bio back
756
 
         * to ibr->ib_bio_set.
757
 
         */
 
638
 
758
639
        bio_put(bio);
759
 
        /*
760
 
         * Wait to complete the task until the last bio as completed.
761
 
         */
762
 
        if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
 
640
 
 
641
        if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
763
642
                return;
764
 
        /*
765
 
         * Return GOOD status for task if zero ib_bio_err_cnt exists.
766
 
         */
767
 
        ibr->ib_bio = NULL;
768
 
        transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
 
643
 
 
644
        pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
 
645
                 task, bio, task->task_lba,
 
646
                 (unsigned long long)bio->bi_sector, err);
 
647
 
 
648
        transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
769
649
}
770
650
 
771
651
static struct se_subsystem_api iblock_template = {
772
652
        .name                   = "iblock",
773
653
        .owner                  = THIS_MODULE,
774
654
        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
775
 
        .map_task_SG            = iblock_map_task_SG,
 
655
        .write_cache_emulated   = 1,
 
656
        .fua_write_emulated     = 1,
776
657
        .attach_hba             = iblock_attach_hba,
777
658
        .detach_hba             = iblock_detach_hba,
778
659
        .allocate_virtdevice    = iblock_allocate_virtdevice,
779
660
        .create_virtdevice      = iblock_create_virtdevice,
780
661
        .free_device            = iblock_free_device,
781
 
        .dpo_emulated           = iblock_emulated_dpo,
782
 
        .fua_write_emulated     = iblock_emulated_fua_write,
783
 
        .fua_read_emulated      = iblock_emulated_fua_read,
784
 
        .write_cache_emulated   = iblock_emulated_write_cache,
785
662
        .alloc_task             = iblock_alloc_task,
786
663
        .do_task                = iblock_do_task,
787
664
        .do_discard             = iblock_do_discard,
790
667
        .check_configfs_dev_params = iblock_check_configfs_dev_params,
791
668
        .set_configfs_dev_params = iblock_set_configfs_dev_params,
792
669
        .show_configfs_dev_params = iblock_show_configfs_dev_params,
793
 
        .get_cdb                = iblock_get_cdb,
794
670
        .get_device_rev         = iblock_get_device_rev,
795
671
        .get_device_type        = iblock_get_device_type,
796
672
        .get_blocks             = iblock_get_blocks,