4
* Copyright (C) 1999-2002 Red Hat Software
6
* Written by Alan Cox, Building Number Three Ltd
8
* This program is free software; you can redistribute it and/or modify it
9
* under the terms of the GNU General Public License as published by the
10
* Free Software Foundation; either version 2 of the License, or (at your
11
* option) any later version.
13
* This program is distributed in the hope that it will be useful, but
14
* WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
* General Public License for more details.
18
* For the purpose of avoiding doubt the preferred form of the work
19
* for making modifications shall be a standards compliant form such
20
* gzipped tar and not one requiring a proprietary or patent encumbered
25
* Multiple device handling error fixes,
26
* Added a queue depth.
28
* FC920 has an rmw bug. Dont or in the end marker.
29
* Removed queue walk, fixed for 64bitness.
30
* Rewrote much of the code over time
31
* Added indirect block lists
32
* Handle 64K limits on many controllers
33
* Don't use indirects on the Promise (breaks)
34
* Heavily chop down the queue depths
36
* Independent queues per IOP
37
* Support for dynamic device creation/deletion
39
* Support for larger I/Os through merge* functions
40
* (taken from DAC960 driver)
41
* Boji T Kannanthanam:
42
* Set the I2O Block devices to be detected in increasing
43
* order of TIDs during boot.
44
* Search and set the I2O block device that we boot off
45
* from as the first device to be claimed (as /dev/i2o/hda)
46
* Properly attach/detach I2O gendisk structure from the
47
* system gendisk list. The I2O block devices now appear in
49
* Markus Lidel <Markus.Lidel@shadowconnect.com>:
50
* Minor bugfixes for 2.6.
53
#include <linux/module.h>
54
#include <linux/slab.h>
55
#include <linux/i2o.h>
56
#include <linux/mutex.h>
58
#include <linux/mempool.h>
60
#include <linux/genhd.h>
61
#include <linux/blkdev.h>
62
#include <linux/hdreg.h>
64
#include <scsi/scsi.h>
66
#include "i2o_block.h"
68
#define OSM_NAME "block-osm"
69
#define OSM_VERSION "1.325"
70
#define OSM_DESCRIPTION "I2O Block Device OSM"
72
static DEFINE_MUTEX(i2o_block_mutex);
73
static struct i2o_driver i2o_block_driver;
75
/* global Block OSM request mempool */
76
static struct i2o_block_mempool i2o_blk_req_pool;
78
/* Block OSM class handling definition */
79
static struct i2o_class_id i2o_block_class_id[] = {
80
{I2O_CLASS_RANDOM_BLOCK_STORAGE},
85
* i2o_block_device_free - free the memory of the I2O Block device
86
* @dev: I2O Block device, which should be cleaned up
88
* Frees the request queue, gendisk and the i2o_block_device structure.
90
static void i2o_block_device_free(struct i2o_block_device *dev)
92
blk_cleanup_queue(dev->gd->queue);
100
* i2o_block_remove - remove the I2O Block device from the system again
101
* @dev: I2O Block device which should be removed
103
* Remove gendisk from system and free all allocated memory.
107
static int i2o_block_remove(struct device *dev)
109
struct i2o_device *i2o_dev = to_i2o_device(dev);
110
struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
112
osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
113
i2o_blk_dev->gd->disk_name);
115
i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
117
del_gendisk(i2o_blk_dev->gd);
119
dev_set_drvdata(dev, NULL);
121
i2o_device_claim_release(i2o_dev);
123
i2o_block_device_free(i2o_blk_dev);
129
* i2o_block_device flush - Flush all dirty data of I2O device dev
130
* @dev: I2O device which should be flushed
132
* Flushes all dirty data on device dev.
134
* Returns 0 on success or negative error code on failure.
136
static int i2o_block_device_flush(struct i2o_device *dev)
138
struct i2o_message *msg;
140
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
144
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
146
cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
148
msg->body[0] = cpu_to_le32(60 << 16);
149
osm_debug("Flushing...\n");
151
return i2o_msg_post_wait(dev->iop, msg, 60);
155
* i2o_block_device_mount - Mount (load) the media of device dev
156
* @dev: I2O device which should receive the mount request
157
* @media_id: Media Identifier
159
* Load a media into drive. Identifier should be set to -1, because the
160
* spec does not support any other value.
162
* Returns 0 on success or negative error code on failure.
164
static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
166
struct i2o_message *msg;
168
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
172
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
174
cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
176
msg->body[0] = cpu_to_le32(-1);
177
msg->body[1] = cpu_to_le32(0x00000000);
178
osm_debug("Mounting...\n");
180
return i2o_msg_post_wait(dev->iop, msg, 2);
184
* i2o_block_device_lock - Locks the media of device dev
185
* @dev: I2O device which should receive the lock request
186
* @media_id: Media Identifier
188
* Lock media of device dev to prevent removal. The media identifier
189
* should be set to -1, because the spec does not support any other value.
191
* Returns 0 on success or negative error code on failure.
193
static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
195
struct i2o_message *msg;
197
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
201
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
203
cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
205
msg->body[0] = cpu_to_le32(-1);
206
osm_debug("Locking...\n");
208
return i2o_msg_post_wait(dev->iop, msg, 2);
212
* i2o_block_device_unlock - Unlocks the media of device dev
213
* @dev: I2O device which should receive the unlocked request
214
* @media_id: Media Identifier
216
* Unlocks the media in device dev. The media identifier should be set to
217
* -1, because the spec does not support any other value.
219
* Returns 0 on success or negative error code on failure.
221
static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
223
struct i2o_message *msg;
225
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
229
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
231
cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
233
msg->body[0] = cpu_to_le32(media_id);
234
osm_debug("Unlocking...\n");
236
return i2o_msg_post_wait(dev->iop, msg, 2);
240
* i2o_block_device_power - Power management for device dev
241
* @dev: I2O device which should receive the power management request
242
* @op: Operation to send
244
* Send a power management request to the device dev.
246
* Returns 0 on success or negative error code on failure.
248
static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
250
struct i2o_device *i2o_dev = dev->i2o_dev;
251
struct i2o_controller *c = i2o_dev->iop;
252
struct i2o_message *msg;
255
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
259
msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
261
cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
263
msg->body[0] = cpu_to_le32(op << 24);
264
osm_debug("Power...\n");
266
rc = i2o_msg_post_wait(c, msg, 60);
274
* i2o_block_request_alloc - Allocate an I2O block request struct
276
* Allocates an I2O block request struct and initialize the list.
278
* Returns a i2o_block_request pointer on success or negative error code
281
static inline struct i2o_block_request *i2o_block_request_alloc(void)
283
struct i2o_block_request *ireq;
285
ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
287
return ERR_PTR(-ENOMEM);
289
INIT_LIST_HEAD(&ireq->queue);
290
sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS);
296
* i2o_block_request_free - Frees a I2O block request
297
* @ireq: I2O block request which should be freed
299
* Frees the allocated memory (give it back to the request mempool).
301
static inline void i2o_block_request_free(struct i2o_block_request *ireq)
303
mempool_free(ireq, i2o_blk_req_pool.pool);
307
* i2o_block_sglist_alloc - Allocate the SG list and map it
308
* @c: I2O controller to which the request belongs
309
* @ireq: I2O block request
310
* @mptr: message body pointer
312
* Builds the SG list and map it to be accessible by the controller.
314
* Returns 0 on failure or 1 on success.
316
static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
317
struct i2o_block_request *ireq,
321
enum dma_data_direction direction;
323
ireq->dev = &c->pdev->dev;
324
nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
326
if (rq_data_dir(ireq->req) == READ)
327
direction = PCI_DMA_FROMDEVICE;
329
direction = PCI_DMA_TODEVICE;
331
ireq->sg_nents = nents;
333
return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
337
* i2o_block_sglist_free - Frees the SG list
338
* @ireq: I2O block request from which the SG should be freed
340
* Frees the SG list from the I2O block request.
342
static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
344
enum dma_data_direction direction;
346
if (rq_data_dir(ireq->req) == READ)
347
direction = PCI_DMA_FROMDEVICE;
349
direction = PCI_DMA_TODEVICE;
351
dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
355
* i2o_block_prep_req_fn - Allocates I2O block device specific struct
356
* @q: request queue for the request
357
* @req: the request to prepare
359
* Allocate the necessary i2o_block_request struct and connect it to
360
* the request. This is needed that we not lose the SG list later on.
362
* Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
364
static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
366
struct i2o_block_device *i2o_blk_dev = q->queuedata;
367
struct i2o_block_request *ireq;
369
if (unlikely(!i2o_blk_dev)) {
370
osm_err("block device already removed\n");
374
/* connect the i2o_block_request to the request */
376
ireq = i2o_block_request_alloc();
378
osm_debug("unable to allocate i2o_block_request!\n");
379
return BLKPREP_DEFER;
382
ireq->i2o_blk_dev = i2o_blk_dev;
386
/* do not come back here */
387
req->cmd_flags |= REQ_DONTPREP;
393
* i2o_block_delayed_request_fn - delayed request queue function
394
* @work: the delayed request with the queue to start
396
* If the request queue is stopped for a disk, and there is no open
397
* request, a new event is created, which calls this function to start
398
* the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
401
static void i2o_block_delayed_request_fn(struct work_struct *work)
403
struct i2o_block_delayed_request *dreq =
404
container_of(work, struct i2o_block_delayed_request,
406
struct request_queue *q = dreq->queue;
409
spin_lock_irqsave(q->queue_lock, flags);
411
spin_unlock_irqrestore(q->queue_lock, flags);
416
* i2o_block_end_request - Post-processing of completed commands
417
* @req: request which should be completed
418
* @error: 0 for success, < 0 for error
419
* @nr_bytes: number of bytes to complete
421
* Mark the request as complete. The lock must not be held when entering.
424
static void i2o_block_end_request(struct request *req, int error,
427
struct i2o_block_request *ireq = req->special;
428
struct i2o_block_device *dev = ireq->i2o_blk_dev;
429
struct request_queue *q = req->q;
432
if (blk_end_request(req, error, nr_bytes))
434
blk_end_request_all(req, -EIO);
436
spin_lock_irqsave(q->queue_lock, flags);
439
dev->open_queue_depth--;
440
list_del(&ireq->queue);
445
spin_unlock_irqrestore(q->queue_lock, flags);
447
i2o_block_sglist_free(ireq);
448
i2o_block_request_free(ireq);
452
* i2o_block_reply - Block OSM reply handler.
453
* @c: I2O controller from which the message arrives
454
* @m: message id of reply
455
* @msg: the actual I2O message reply
457
* This function gets all the message replies.
460
static int i2o_block_reply(struct i2o_controller *c, u32 m,
461
struct i2o_message *msg)
466
req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
467
if (unlikely(!req)) {
468
osm_err("NULL reply received!\n");
473
* Lets see what is cooking. We stuffed the
474
* request in the context.
477
if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
478
u32 status = le32_to_cpu(msg->body[0]);
480
* Device not ready means two things. One is that the
481
* the thing went offline (but not a removal media)
483
* The second is that you have a SuperTrak 100 and the
484
* firmware got constipated. Unlike standard i2o card
485
* setups the supertrak returns an error rather than
486
* blocking for the timeout in these cases.
488
* Don't stick a supertrak100 into cache aggressive modes
491
osm_err("TID %03x error status: 0x%02x, detailed status: "
492
"0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
493
status >> 24, status & 0xffff);
500
i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
505
static void i2o_block_event(struct work_struct *work)
507
struct i2o_event *evt = container_of(work, struct i2o_event, work);
508
osm_debug("event received\n");
513
* SCSI-CAM for ioctl geometry mapping
514
* Duplicated with SCSI - this should be moved into somewhere common
517
* LBA -> CHS mapping table taken from:
519
* "Incorporating the I2O Architecture into BIOS for Intel Architecture
522
* This is an I2O document that is only available to I2O members,
525
* From my understanding, this is how all the I2O cards do this
527
* Disk Size | Sectors | Heads | Cylinders
528
* ---------------+---------+-------+-------------------
529
* 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
530
* 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
531
* 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
532
* 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
535
#define BLOCK_SIZE_528M 1081344
536
#define BLOCK_SIZE_1G 2097152
537
#define BLOCK_SIZE_21G 4403200
538
#define BLOCK_SIZE_42G 8806400
539
#define BLOCK_SIZE_84G 17612800
541
static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
542
unsigned char *hds, unsigned char *secs)
544
unsigned long heads, sectors, cylinders;
546
sectors = 63L; /* Maximize sectors per track */
547
if (capacity <= BLOCK_SIZE_528M)
549
else if (capacity <= BLOCK_SIZE_1G)
551
else if (capacity <= BLOCK_SIZE_21G)
553
else if (capacity <= BLOCK_SIZE_42G)
558
cylinders = (unsigned long)capacity / (heads * sectors);
560
*cyls = (unsigned short)cylinders; /* Stuff return values */
561
*secs = (unsigned char)sectors;
562
*hds = (unsigned char)heads;
566
* i2o_block_open - Open the block device
567
* @bdev: block device being opened
568
* @mode: file open mode
570
* Power up the device, mount and lock the media. This function is called,
571
* if the block device is opened for access.
573
* Returns 0 on success or negative error code on failure.
575
static int i2o_block_open(struct block_device *bdev, fmode_t mode)
577
struct i2o_block_device *dev = bdev->bd_disk->private_data;
582
mutex_lock(&i2o_block_mutex);
583
if (dev->power > 0x1f)
584
i2o_block_device_power(dev, 0x02);
586
i2o_block_device_mount(dev->i2o_dev, -1);
588
i2o_block_device_lock(dev->i2o_dev, -1);
590
osm_debug("Ready.\n");
591
mutex_unlock(&i2o_block_mutex);
597
* i2o_block_release - Release the I2O block device
598
* @disk: gendisk device being released
599
* @mode: file open mode
601
* Unlock and unmount the media, and power down the device. Gets called if
602
* the block device is closed.
604
* Returns 0 on success or negative error code on failure.
606
static int i2o_block_release(struct gendisk *disk, fmode_t mode)
608
struct i2o_block_device *dev = disk->private_data;
612
* This is to deail with the case of an application
613
* opening a device and then the device disappears while
614
* it's in use, and then the application tries to release
615
* it. ex: Unmounting a deleted RAID volume at reboot.
616
* If we send messages, it will just cause FAILs since
617
* the TID no longer exists.
622
mutex_lock(&i2o_block_mutex);
623
i2o_block_device_flush(dev->i2o_dev);
625
i2o_block_device_unlock(dev->i2o_dev, -1);
627
if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
632
i2o_block_device_power(dev, operation);
633
mutex_unlock(&i2o_block_mutex);
638
static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
640
i2o_block_biosparam(get_capacity(bdev->bd_disk),
641
&geo->cylinders, &geo->heads, &geo->sectors);
646
* i2o_block_ioctl - Issue device specific ioctl calls.
647
* @bdev: block device being opened
648
* @mode: file open mode
649
* @cmd: ioctl command
652
* Handles ioctl request for the block device.
654
* Return 0 on success or negative error on failure.
656
static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
657
unsigned int cmd, unsigned long arg)
659
struct gendisk *disk = bdev->bd_disk;
660
struct i2o_block_device *dev = disk->private_data;
663
/* Anyone capable of this syscall can do *real bad* things */
665
if (!capable(CAP_SYS_ADMIN))
668
mutex_lock(&i2o_block_mutex);
671
ret = put_user(dev->rcache, (int __user *)arg);
674
ret = put_user(dev->wcache, (int __user *)arg);
678
if (arg < 0 || arg > CACHE_SMARTFETCH)
686
&& (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
692
mutex_unlock(&i2o_block_mutex);
698
* i2o_block_check_events - Have we seen a media change?
699
* @disk: gendisk which should be verified
700
* @clearing: events being cleared
702
* Verifies if the media has changed.
704
* Returns 1 if the media was changed or 0 otherwise.
706
static unsigned int i2o_block_check_events(struct gendisk *disk,
707
unsigned int clearing)
709
struct i2o_block_device *p = disk->private_data;
711
if (p->media_change_flag) {
712
p->media_change_flag = 0;
713
return DISK_EVENT_MEDIA_CHANGE;
719
* i2o_block_transfer - Transfer a request to/from the I2O controller
720
* @req: the request which should be transferred
722
* This function converts the request into a I2O message. The necessary
723
* DMA buffers are allocated and after everything is setup post the message
724
* to the I2O controller. No cleanup is done by this function. It is done
725
* on the interrupt side when the reply arrives.
727
* Return 0 on success or negative error code on failure.
729
static int i2o_block_transfer(struct request *req)
731
struct i2o_block_device *dev = req->rq_disk->private_data;
732
struct i2o_controller *c;
734
struct i2o_message *msg;
736
struct i2o_block_request *ireq = req->special;
738
u32 sgl_offset = SGL_OFFSET_8;
739
u32 ctl_flags = 0x00000000;
743
if (unlikely(!dev->i2o_dev)) {
744
osm_err("transfer to removed drive\n");
749
tid = dev->i2o_dev->lct_data.tid;
750
c = dev->i2o_dev->iop;
752
msg = i2o_msg_get(c);
758
tcntxt = i2o_cntxt_list_add(c, req);
764
msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
765
msg->u.s.tcntxt = cpu_to_le32(tcntxt);
767
mptr = &msg->body[0];
769
if (rq_data_dir(req) == READ) {
770
cmd = I2O_CMD_BLOCK_READ << 24;
772
switch (dev->rcache) {
774
ctl_flags = 0x201F0008;
777
case CACHE_SMARTFETCH:
778
if (blk_rq_sectors(req) > 16)
779
ctl_flags = 0x201F0008;
781
ctl_flags = 0x001F0000;
788
cmd = I2O_CMD_BLOCK_WRITE << 24;
790
switch (dev->wcache) {
791
case CACHE_WRITETHROUGH:
792
ctl_flags = 0x001F0008;
794
case CACHE_WRITEBACK:
795
ctl_flags = 0x001F0010;
797
case CACHE_SMARTBACK:
798
if (blk_rq_sectors(req) > 16)
799
ctl_flags = 0x001F0004;
801
ctl_flags = 0x001F0010;
803
case CACHE_SMARTTHROUGH:
804
if (blk_rq_sectors(req) > 16)
805
ctl_flags = 0x001F0004;
807
ctl_flags = 0x001F0010;
813
#ifdef CONFIG_I2O_EXT_ADAPTEC
819
hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
822
sgl_offset = SGL_OFFSET_12;
825
cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
827
*mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
828
*mptr++ = cpu_to_le32(tid);
833
* RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
835
if (rq_data_dir(req) == READ) {
837
scsi_flags = 0x60a0000a;
840
scsi_flags = 0xa0a0000a;
843
*mptr++ = cpu_to_le32(scsi_flags);
845
*((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
846
*((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
848
memcpy(mptr, cmd, 10);
850
*mptr++ = cpu_to_le32(blk_rq_bytes(req));
854
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
855
*mptr++ = cpu_to_le32(ctl_flags);
856
*mptr++ = cpu_to_le32(blk_rq_bytes(req));
858
cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
860
cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
863
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
869
cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
871
list_add_tail(&ireq->queue, &dev->open_queue);
872
dev->open_queue_depth++;
874
i2o_msg_post(c, msg);
879
i2o_cntxt_list_remove(c, req);
889
* i2o_block_request_fn - request queue handling function
890
* @q: request queue from which the request could be fetched
892
* Takes the next request from the queue, transfers it and if no error
893
* occurs dequeue it from the queue. On arrival of the reply the message
894
* will be processed further. If an error occurs requeue the request.
896
static void i2o_block_request_fn(struct request_queue *q)
900
while ((req = blk_peek_request(q)) != NULL) {
901
if (req->cmd_type == REQ_TYPE_FS) {
902
struct i2o_block_delayed_request *dreq;
903
struct i2o_block_request *ireq = req->special;
904
unsigned int queue_depth;
906
queue_depth = ireq->i2o_blk_dev->open_queue_depth;
908
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
909
if (!i2o_block_transfer(req)) {
910
blk_start_request(req);
913
osm_info("transfer error\n");
919
/* stop the queue and retry later */
920
dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
925
INIT_DELAYED_WORK(&dreq->work,
926
i2o_block_delayed_request_fn);
928
if (!queue_delayed_work(i2o_block_driver.event_queue,
930
I2O_BLOCK_RETRY_TIME))
937
blk_start_request(req);
938
__blk_end_request_all(req, -EIO);
943
/* I2O Block device operations definition */
944
static const struct block_device_operations i2o_block_fops = {
945
.owner = THIS_MODULE,
946
.open = i2o_block_open,
947
.release = i2o_block_release,
948
.ioctl = i2o_block_ioctl,
949
.compat_ioctl = i2o_block_ioctl,
950
.getgeo = i2o_block_getgeo,
951
.check_events = i2o_block_check_events,
955
* i2o_block_device_alloc - Allocate memory for a I2O Block device
957
* Allocate memory for the i2o_block_device struct, gendisk and request
958
* queue and initialize them as far as no additional information is needed.
960
* Returns a pointer to the allocated I2O Block device on success or a
961
* negative error code on failure.
963
static struct i2o_block_device *i2o_block_device_alloc(void)
965
struct i2o_block_device *dev;
967
struct request_queue *queue;
970
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
972
osm_err("Insufficient memory to allocate I2O Block disk.\n");
977
INIT_LIST_HEAD(&dev->open_queue);
978
spin_lock_init(&dev->lock);
979
dev->rcache = CACHE_PREFETCH;
980
dev->wcache = CACHE_WRITEBACK;
982
/* allocate a gendisk with 16 partitions */
985
osm_err("Insufficient memory to allocate gendisk.\n");
990
/* initialize the request queue */
991
queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
993
osm_err("Insufficient memory to allocate request queue.\n");
998
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1000
gd->major = I2O_MAJOR;
1002
gd->fops = &i2o_block_fops;
1003
gd->private_data = dev;
1020
* i2o_block_probe - verify if dev is a I2O Block device and install it
1021
* @dev: device to verify if it is a I2O Block device
1023
* We only verify if the user_tid of the device is 0xfff and then install
1024
* the device. Otherwise it is used by some other device (e. g. RAID).
1026
* Returns 0 on success or negative error code on failure.
1028
static int i2o_block_probe(struct device *dev)
1030
struct i2o_device *i2o_dev = to_i2o_device(dev);
1031
struct i2o_controller *c = i2o_dev->iop;
1032
struct i2o_block_device *i2o_blk_dev;
1034
struct request_queue *queue;
1035
static int unit = 0;
1041
unsigned short max_sectors;
1043
#ifdef CONFIG_I2O_EXT_ADAPTEC
1048
if (c->limit_sectors)
1049
max_sectors = I2O_MAX_SECTORS_LIMITED;
1051
max_sectors = I2O_MAX_SECTORS;
1053
/* skip devices which are used by IOP */
1054
if (i2o_dev->lct_data.user_tid != 0xfff) {
1055
osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1059
if (i2o_device_claim(i2o_dev)) {
1060
osm_warn("Unable to claim device. Installation aborted\n");
1065
i2o_blk_dev = i2o_block_device_alloc();
1066
if (IS_ERR(i2o_blk_dev)) {
1067
osm_err("could not alloc a new I2O block device");
1068
rc = PTR_ERR(i2o_blk_dev);
1072
i2o_blk_dev->i2o_dev = i2o_dev;
1073
dev_set_drvdata(dev, i2o_blk_dev);
1076
gd = i2o_blk_dev->gd;
1077
gd->first_minor = unit << 4;
1078
sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1079
gd->driverfs_dev = &i2o_dev->device;
1081
/* setup request queue */
1083
queue->queuedata = i2o_blk_dev;
1085
blk_queue_max_hw_sectors(queue, max_sectors);
1086
blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
1088
osm_debug("max sectors = %d\n", queue->max_sectors);
1089
osm_debug("phys segments = %d\n", queue->max_phys_segments);
1090
osm_debug("max hw segments = %d\n", queue->max_hw_segments);
1093
* Ask for the current media data. If that isn't supported
1094
* then we ask for the device capacity data
1096
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1097
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1098
blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1100
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1102
if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1103
!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1104
set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1106
osm_warn("could not get size of %s\n", gd->disk_name);
1108
if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1109
i2o_blk_dev->power = power;
1111
i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1117
osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
1118
i2o_blk_dev->gd->disk_name);
1123
i2o_device_claim_release(i2o_dev);
1129
/* Block OSM driver struct */
1130
static struct i2o_driver i2o_block_driver = {
1132
.event = i2o_block_event,
1133
.reply = i2o_block_reply,
1134
.classes = i2o_block_class_id,
1136
.probe = i2o_block_probe,
1137
.remove = i2o_block_remove,
1142
* i2o_block_init - Block OSM initialization function
1144
* Allocate the slab and mempool for request structs, registers i2o_block
1145
* block device and finally register the Block OSM in the I2O core.
1147
* Returns 0 on success or negative error code on failure.
1149
static int __init i2o_block_init(void)
1154
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1156
/* Allocate request mempool and slab */
1157
size = sizeof(struct i2o_block_request);
1158
i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1159
SLAB_HWCACHE_ALIGN, NULL);
1160
if (!i2o_blk_req_pool.slab) {
1161
osm_err("can't init request slab\n");
1166
i2o_blk_req_pool.pool =
1167
mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1168
i2o_blk_req_pool.slab);
1169
if (!i2o_blk_req_pool.pool) {
1170
osm_err("can't init request mempool\n");
1175
/* Register the block device interfaces */
1176
rc = register_blkdev(I2O_MAJOR, "i2o_block");
1178
osm_err("unable to register block device\n");
1182
osm_info("registered device at major %d\n", I2O_MAJOR);
1185
/* Register Block OSM into I2O core */
1186
rc = i2o_driver_register(&i2o_block_driver);
1188
osm_err("Could not register Block driver\n");
1189
goto unregister_blkdev;
1195
unregister_blkdev(I2O_MAJOR, "i2o_block");
1198
mempool_destroy(i2o_blk_req_pool.pool);
1201
kmem_cache_destroy(i2o_blk_req_pool.slab);
1208
* i2o_block_exit - Block OSM exit function
1210
* Unregisters Block OSM from I2O core, unregisters i2o_block block device
1211
* and frees the mempool and slab.
1213
static void __exit i2o_block_exit(void)
1215
/* Unregister I2O Block OSM from I2O core */
1216
i2o_driver_unregister(&i2o_block_driver);
1218
/* Unregister block device */
1219
unregister_blkdev(I2O_MAJOR, "i2o_block");
1221
/* Free request mempool and slab */
1222
mempool_destroy(i2o_blk_req_pool.pool);
1223
kmem_cache_destroy(i2o_blk_req_pool.slab);
1226
MODULE_AUTHOR("Red Hat");
1227
MODULE_LICENSE("GPL");
1228
MODULE_DESCRIPTION(OSM_DESCRIPTION);
1229
MODULE_VERSION(OSM_VERSION);
1231
module_init(i2o_block_init);
1232
module_exit(i2o_block_exit);