167
158
limits->logical_block_size = bdev_logical_block_size(bd);
168
159
limits->max_hw_sectors = queue_max_hw_sectors(q);
169
160
limits->max_sectors = queue_max_sectors(q);
170
dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
171
dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
161
dev_limits.hw_queue_depth = q->nr_requests;
162
dev_limits.queue_depth = q->nr_requests;
173
ib_dev->ibd_major = MAJOR(bd->bd_dev);
174
ib_dev->ibd_minor = MINOR(bd->bd_dev);
175
164
ib_dev->ibd_bd = bd;
177
166
dev = transport_add_device_to_core_hba(hba,
178
&iblock_template, se_dev, dev_flags, (void *)ib_dev,
167
&iblock_template, se_dev, dev_flags, ib_dev,
179
168
&dev_limits, "IBLOCK", IBLOCK_VERSION);
183
ib_dev->ibd_depth = dev->queue_depth;
186
173
* Check if the underlying struct block_device request_queue supports
187
174
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
188
175
* in ATA and we need to set TPE=1
190
177
if (blk_queue_discard(q)) {
191
DEV_ATTRIB(dev)->max_unmap_lba_count =
178
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
192
179
q->limits.max_discard_sectors;
194
181
* Currently hardcoded to 1 in Linux/SCSI code..
196
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
197
DEV_ATTRIB(dev)->unmap_granularity =
183
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
184
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
198
185
q->limits.discard_granularity;
199
DEV_ATTRIB(dev)->unmap_granularity_alignment =
186
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
200
187
q->limits.discard_alignment;
202
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
189
pr_debug("IBLOCK: BLOCK Discard support available,"
203
190
" disabled by default\n");
193
if (blk_queue_nonrot(q))
194
dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
327
314
return blocks_long;
317
static void iblock_end_io_flush(struct bio *bio, int err)
319
struct se_cmd *cmd = bio->bi_private;
322
pr_err("IBLOCK: cache flush failed: %d\n", err);
325
transport_complete_sync_cache(cmd, err == 0);
331
* Emulate SYCHRONIZE_CACHE_*
330
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
331
* always flush the whole cache.
333
333
static void iblock_emulate_sync_cache(struct se_task *task)
335
struct se_cmd *cmd = TASK_CMD(task);
335
struct se_cmd *cmd = task->task_se_cmd;
336
336
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
337
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
338
sector_t error_sector;
337
int immed = (cmd->t_task_cdb[1] & 0x2);
342
341
* If the Immediate bit is set, queue up the GOOD response
343
* for this SYNCHRONIZE_CACHE op
342
* for this SYNCHRONIZE_CACHE op.
346
345
transport_complete_sync_cache(cmd, 1);
349
* blkdev_issue_flush() does not support a specifying a range, so
350
* we have to flush the entire cache.
352
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
354
printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
355
" error_sector: %llu\n", ret,
356
(unsigned long long)error_sector);
347
bio = bio_alloc(GFP_KERNEL, 0);
348
bio->bi_end_io = iblock_end_io_flush;
349
bio->bi_bdev = ib_dev->ibd_bd;
360
transport_complete_sync_cache(cmd, ret == 0);
364
* Tell TCM Core that we are capable of WriteCache emulation for
365
* an underlying struct se_device.
367
static int iblock_emulated_write_cache(struct se_device *dev)
372
static int iblock_emulated_dpo(struct se_device *dev)
378
* Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
381
static int iblock_emulated_fua_write(struct se_device *dev)
386
static int iblock_emulated_fua_read(struct se_device *dev)
391
static int iblock_do_task(struct se_task *task)
393
struct se_device *dev = task->task_se_cmd->se_dev;
394
struct iblock_req *req = IBLOCK_REQ(task);
395
struct bio *bio = req->ib_bio, *nbio = NULL;
396
struct blk_plug plug;
399
if (task->task_data_direction == DMA_TO_DEVICE) {
401
* Force data to disk if we pretend to not have a volatile
402
* write cache, or the initiator set the Force Unit Access bit.
404
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
405
(DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
406
T_TASK(task->task_se_cmd)->t_tasks_fua))
414
blk_start_plug(&plug);
418
DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
419
" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
424
blk_finish_plug(&plug);
426
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
351
bio->bi_private = cmd;
352
submit_bio(WRITE_FLUSH, bio);
429
355
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
569
477
static void iblock_bio_destructor(struct bio *bio)
571
479
struct se_task *task = bio->bi_private;
572
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
480
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
574
482
bio_free(bio, ib_dev->ibd_bio_set);
577
static struct bio *iblock_get_bio(
578
struct se_task *task,
579
struct iblock_req *ib_req,
580
struct iblock_dev *ib_dev,
486
iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
488
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
489
struct iblock_req *ib_req = IBLOCK_REQ(task);
587
492
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
589
printk(KERN_ERR "Unable to allocate memory for bio\n");
590
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
494
pr_err("Unable to allocate memory for bio\n");
594
DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
595
" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
596
DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
498
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
499
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
500
pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
598
502
bio->bi_bdev = ib_dev->ibd_bd;
599
bio->bi_private = (void *) task;
503
bio->bi_private = task;
600
504
bio->bi_destructor = iblock_bio_destructor;
601
505
bio->bi_end_io = &iblock_bio_done;
602
506
bio->bi_sector = lba;
603
507
atomic_inc(&ib_req->ib_bio_cnt);
605
DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
606
DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
509
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
510
pr_debug("Set ib_req->ib_bio_cnt: %d\n",
607
511
atomic_read(&ib_req->ib_bio_cnt));
611
static int iblock_map_task_SG(struct se_task *task)
515
static int iblock_do_task(struct se_task *task)
613
517
struct se_cmd *cmd = task->task_se_cmd;
614
struct se_device *dev = SE_DEV(cmd);
615
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
616
struct iblock_req *ib_req = IBLOCK_REQ(task);
617
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
518
struct se_device *dev = cmd->se_dev;
520
struct bio_list list;
618
521
struct scatterlist *sg;
620
u32 i, sg_num = task->task_sg_num;
522
u32 i, sg_num = task->task_sg_nents;
621
523
sector_t block_lba;
524
struct blk_plug plug;
527
if (task->task_data_direction == DMA_TO_DEVICE) {
529
* Force data to disk if we pretend to not have a volatile
530
* write cache, or the initiator set the Force Unit Access bit.
532
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
533
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
534
task->task_se_cmd->t_tasks_fua))
623
543
* Do starting conversion up from non 512-byte blocksize with
624
544
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
626
if (DEV_ATTRIB(dev)->block_size == 4096)
546
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
627
547
block_lba = (task->task_lba << 3);
628
else if (DEV_ATTRIB(dev)->block_size == 2048)
548
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
629
549
block_lba = (task->task_lba << 2);
630
else if (DEV_ATTRIB(dev)->block_size == 1024)
550
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
631
551
block_lba = (task->task_lba << 1);
632
else if (DEV_ATTRIB(dev)->block_size == 512)
552
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
633
553
block_lba = task->task_lba;
635
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
636
" %u\n", DEV_ATTRIB(dev)->block_size);
555
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
637
557
return PYX_TRANSPORT_LU_COMM_FAILURE;
640
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
644
ib_req->ib_bio = bio;
647
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
648
* from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
650
for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
651
DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
652
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
653
sg->length, sg->offset);
655
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
656
if (ret != sg->length) {
658
DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
660
DEBUG_IBLOCK("** task->task_size: %u\n",
662
DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
664
DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
667
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
560
bio = iblock_get_bio(task, block_lba, sg_num);
562
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
564
bio_list_init(&list);
565
bio_list_add(&list, bio);
567
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
569
* XXX: if the length the device accepts is shorter than the
570
* length of the S/G list entry this will cause and
571
* endless loop. Better hope no driver uses huge pages.
573
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
575
bio = iblock_get_bio(task, block_lba, sg_num);
578
bio_list_add(&list, bio);
672
tbio = tbio->bi_next = bio;
673
DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
674
" list, Going to again\n", bio);
677
581
/* Always in 512 byte units for Linux/Block */
678
582
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
680
DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
681
" sg_num to %u\n", task, sg_num);
682
DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
683
" to %llu\n", task, block_lba);
684
DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
685
" %u\n", task, bio->bi_vcnt);
586
blk_start_plug(&plug);
587
while ((bio = bio_list_pop(&list)))
589
blk_finish_plug(&plug);
591
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
692
hbio = hbio->bi_next;
594
while ((bio = bio_list_pop(&list)))
699
static unsigned char *iblock_get_cdb(struct se_task *task)
701
return IBLOCK_REQ(task)->ib_scsi_cdb;
596
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
704
599
static u32 iblock_get_device_rev(struct se_device *dev)
725
620
struct se_task *task = bio->bi_private;
726
621
struct iblock_req *ibr = IBLOCK_REQ(task);
728
624
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
730
if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
626
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
734
printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
630
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
735
631
" err: %d\n", bio, err);
737
633
* Bump the ib_bio_err_cnt and release bio.
739
635
atomic_inc(&ibr->ib_bio_err_cnt);
740
636
smp_mb__after_atomic_inc();
743
* Wait to complete the task until the last bio as completed.
745
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
749
transport_complete_task(task, 0);
752
DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
753
task, bio, task->task_lba, bio->bi_sector, err);
755
* bio_put() will call iblock_bio_destructor() to release the bio back
756
* to ibr->ib_bio_set.
760
* Wait to complete the task until the last bio as completed.
762
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
641
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
765
* Return GOOD status for task if zero ib_bio_err_cnt exists.
768
transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
644
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
645
task, bio, task->task_lba,
646
(unsigned long long)bio->bi_sector, err);
648
transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
771
651
static struct se_subsystem_api iblock_template = {
772
652
.name = "iblock",
773
653
.owner = THIS_MODULE,
774
654
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
775
.map_task_SG = iblock_map_task_SG,
655
.write_cache_emulated = 1,
656
.fua_write_emulated = 1,
776
657
.attach_hba = iblock_attach_hba,
777
658
.detach_hba = iblock_detach_hba,
778
659
.allocate_virtdevice = iblock_allocate_virtdevice,
779
660
.create_virtdevice = iblock_create_virtdevice,
780
661
.free_device = iblock_free_device,
781
.dpo_emulated = iblock_emulated_dpo,
782
.fua_write_emulated = iblock_emulated_fua_write,
783
.fua_read_emulated = iblock_emulated_fua_read,
784
.write_cache_emulated = iblock_emulated_write_cache,
785
662
.alloc_task = iblock_alloc_task,
786
663
.do_task = iblock_do_task,
787
664
.do_discard = iblock_do_discard,