2
* scsi.c Copyright (C) 1992 Drew Eckhardt
3
* Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5
* generic mid-level SCSI driver
6
* Initial versions: Drew Eckhardt
7
* Subsequent revisions: Eric Youngdale
11
* Bug correction thanks go to :
12
* Rik Faith <faith@cs.unc.edu>
13
* Tommy Thorn <tthorn>
14
* Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16
* Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17
* add scatter-gather, multiple outstanding request, and other
20
* Native multichannel, wide scsi, /proc/scsi and hot plugging
21
* support added by Michael Neuffer <mike@i-connect.net>
23
* Added request_module("scsi_hostadapter") for kerneld:
24
* (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25
* Bjorn Ekwall <bj0rn@blox.se>
28
* Major improvements to the timeout, abort, and reset processing,
29
* as well as performance modifications for large queue depths by
30
* Leonard N. Zubkoff <lnz@dandelion.com>
32
* Converted cli() code to spinlocks, Ingo Molnar
34
* Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36
* out_of_space hacks, D. Gilbert (dpg) 990608
39
#define REVISION "Revision: 1.00"
40
#define VERSION "Id: scsi.c 1.00 2000/09/26"
42
#include <xeno/config.h>
43
#include <xeno/module.h>
45
#include <xeno/sched.h>
46
#include <xeno/timer.h>
48
#include <xeno/slab.h>
49
#include <xeno/ioport.h>
50
/*#include <xeno/stat.h>*/
52
#include <xeno/interrupt.h>
53
#include <xeno/delay.h>
54
#include <xeno/init.h>
55
/*#include <xeno/smp_lock.h>*/
56
/*#include <xeno/completion.h>*/
58
/* for xeno scsi_probe() stuff... maybe punt somewhere else? */
59
#include <hypervisor-ifs/block.h>
60
#include <xeno/blkdev.h>
62
#define __KERNEL_SYSCALLS__
64
/*#include <xeno/unistd.h>*/
65
#include <xeno/spinlock.h>
67
#include <asm/system.h>
70
#include <asm/uaccess.h>
74
#include "constants.h"
77
#include <xeno/kmod.h>
80
#undef USE_STATIC_SCSI_MEMORY
82
struct proc_dir_entry *proc_scsi;
85
static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
86
static void scsi_dump_status(int level);
90
static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
94
* Definitions and constants.
97
#define MIN_RESET_DELAY (2*HZ)
99
/* Do not call reset on error if we just did a reset within 15 sec. */
100
#define MIN_RESET_PERIOD (15*HZ)
103
* Macro to determine the size of SCSI command. This macro takes vendor
104
* unique commands into account. SCSI commands in groups 6 and 7 are
105
* vendor unique and we will depend upon the command length being
106
* supplied correctly in cmd_len.
108
#define CDB_SIZE(SCpnt) ((((SCpnt->cmnd[0] >> 5) & 7) < 6) ? \
109
COMMAND_SIZE(SCpnt->cmnd[0]) : SCpnt->cmd_len)
114
unsigned long scsi_pid;
115
Scsi_Cmnd *last_cmnd;
116
/* Command group 3 is reserved and should never be used. */
117
const unsigned char scsi_command_size[8] =
122
static unsigned long serial_number;
123
static Scsi_Cmnd *scsi_bh_queue_head;
124
static Scsi_Cmnd *scsi_bh_queue_tail;
127
* Note - the initial logging level can be set here to log events at boot time.
128
* After the system is up, you may enable logging via the /proc interface.
130
unsigned int scsi_logging_level;
132
const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
151
* Function prototypes.
153
extern void scsi_times_out(Scsi_Cmnd * SCpnt);
154
void scsi_build_commandblocks(Scsi_Device * SDpnt);
157
* These are the interface to the old error handling code. It should go away
160
extern void scsi_old_done(Scsi_Cmnd * SCpnt);
161
extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
162
extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
165
* Private interface into the new error handling code.
167
extern int scsi_new_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
170
* Function: scsi_initialize_queue()
172
* Purpose: Selects queue handler function for a device.
174
* Arguments: SDpnt - device for which we need a handler function.
178
* Lock status: No locking assumed or required.
180
* Notes: Most devices will end up using scsi_request_fn for the
181
* handler function (at least as things are done now).
182
* The "block" feature basically ensures that only one of
183
* the blocked hosts is active at one time, mainly to work around
184
* buggy DMA chipsets where the memory gets starved.
185
* For this case, we have a special handler function, which
186
* does some checks and ultimately calls scsi_request_fn.
188
* The single_lun feature is a similar special case.
190
* We handle these things by stacking the handlers. The
191
* special case handlers simply check a few conditions,
192
* and return if they are not supposed to do anything.
193
* In the event that things are OK, then they call the next
194
* handler in the list - ultimately they call scsi_request_fn
195
* to do the dirty deed.
197
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) {
198
blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
199
blk_queue_headactive(&SDpnt->request_queue, 0);
200
SDpnt->request_queue.queuedata = (void *) SDpnt;
204
MODULE_PARM(scsi_logging_level, "i");
205
MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
208
static int __init scsi_logging_setup(char *str)
213
if (get_option(&str, &tmp) == 1) {
214
scsi_logging_level = (tmp ? ~0 : 0);
217
printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
218
"(n should be 0 or non-zero)\n");
226
__setup("scsi_logging=", scsi_logging_setup);
231
* Issue a command and wait for it to complete
234
static void scsi_wait_done(Scsi_Cmnd * SCpnt)
238
req = &SCpnt->request;
239
req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
242
if (req->waiting != NULL) {
243
complete(req->waiting);
246
/* XXX SMH: just use a flag to signal completion; caller spins */
247
if (*(int *)(req->waiting) != 0) {
248
// printk("scsi_wait_done: flipping wait status on req %p\n", req);
249
*(int *)(req->waiting) = 0;
256
* This lock protects the freelist for all devices on the system.
257
* We could make this finer grained by having a single lock per
258
* device if it is ever found that there is excessive contention
261
static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
264
* Used to protect insertion into and removal from the queue of
265
* commands to be processed by the bottom half handler.
267
static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
270
* Function: scsi_allocate_request
272
* Purpose: Allocate a request descriptor.
274
* Arguments: device - device for which we want a request
276
* Lock status: No locks assumed to be held. This function is SMP-safe.
278
* Returns: Pointer to request block.
280
* Notes: With the new queueing code, it becomes important
281
* to track the difference between a command and a
282
* request. A request is a pending item in the queue that
283
* has not yet reached the top of the queue.
286
Scsi_Request *scsi_allocate_request(Scsi_Device * device)
288
Scsi_Request *SRpnt = NULL;
291
panic("No device passed to scsi_allocate_request().\n");
293
SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
299
memset(SRpnt, 0, sizeof(Scsi_Request));
300
SRpnt->sr_device = device;
301
SRpnt->sr_host = device->host;
302
SRpnt->sr_magic = SCSI_REQ_MAGIC;
303
SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
309
* Function: scsi_release_request
311
* Purpose: Release a request descriptor.
313
* Arguments: device - device for which we want a request
315
* Lock status: No locks assumed to be held. This function is SMP-safe.
317
* Returns: Pointer to request block.
319
* Notes: With the new queueing code, it becomes important
320
* to track the difference between a command and a
321
* request. A request is a pending item in the queue that
322
* has not yet reached the top of the queue. We still need
323
* to free a request when we are done with it, of course.
325
void scsi_release_request(Scsi_Request * req)
327
if( req->sr_command != NULL )
329
scsi_release_command(req->sr_command);
330
req->sr_command = NULL;
337
* Function: scsi_allocate_device
339
* Purpose: Allocate a command descriptor.
341
* Arguments: device - device for which we want a command descriptor
342
* wait - 1 if we should wait in the event that none
344
* interruptible - 1 if we should unblock and return NULL
345
* in the event that we must wait, and a signal
348
* Lock status: No locks assumed to be held. This function is SMP-safe.
350
* Returns: Pointer to command descriptor.
352
* Notes: Prior to the new queue code, this function was not SMP-safe.
354
* If the wait flag is true, and we are waiting for a free
355
* command block, this function will interrupt and return
356
* NULL in the event that a signal arrives that needs to
359
* This function is deprecated, and drivers should be
360
* rewritten to use Scsi_Request instead of Scsi_Cmnd.
363
Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
366
struct Scsi_Host *host;
367
Scsi_Cmnd *SCpnt = NULL;
372
panic("No device passed to scsi_allocate_device().\n");
376
spin_lock_irqsave(&device_request_lock, flags);
380
if (!device->device_blocked) {
381
if (device->single_lun) {
383
* FIXME(eric) - this is not at all optimal. Given that
384
* single lun devices are rare and usually slow
385
* (i.e. CD changers), this is good enough for now, but
386
* we may want to come back and optimize this later.
388
* Scan through all of the devices attached to this
389
* host, and see if any are active or not. If so,
390
* we need to defer this command.
392
* We really need a busy counter per device. This would
393
* allow us to more easily figure out whether we should
394
* do anything here or not.
396
for (SDpnt = host->host_queue;
398
SDpnt = SDpnt->next) {
400
* Only look for other devices on the same bus
401
* with the same target ID.
403
if (SDpnt->channel != device->channel
404
|| SDpnt->id != device->id
405
|| SDpnt == device) {
408
if( atomic_read(&SDpnt->device_active) != 0)
415
* Some other device in this cluster is busy.
416
* If asked to wait, we need to wait, otherwise
424
* Now we can check for a free command block for this device.
426
for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
427
if (SCpnt->request.rq_status == RQ_INACTIVE)
432
* If we couldn't find a free command block, and we have been
433
* asked to wait, then do so.
440
* If we have been asked to wait for a free block, then
444
printk("XXX smh: scsi cannot wait for free cmd block.\n");
447
DECLARE_WAITQUEUE(wait, current);
450
* We need to wait for a free commandblock. We need to
451
* insert ourselves into the list before we release the
452
* lock. This way if a block were released the same
453
* microsecond that we released the lock, the call
454
* to schedule() wouldn't block (well, it might switch,
455
* but the current task will still be schedulable.
457
add_wait_queue(&device->scpnt_wait, &wait);
458
if( interruptable ) {
459
set_current_state(TASK_INTERRUPTIBLE);
461
set_current_state(TASK_UNINTERRUPTIBLE);
464
spin_unlock_irqrestore(&device_request_lock, flags);
467
* This should block until a device command block
472
spin_lock_irqsave(&device_request_lock, flags);
474
remove_wait_queue(&device->scpnt_wait, &wait);
476
* FIXME - Isn't this redundant?? Someone
477
* else will have forced the state back to running.
479
set_current_state(TASK_RUNNING);
481
* In the event that a signal has arrived that we need
482
* to consider, then simply return NULL. Everyone
483
* that calls us should be prepared for this
484
* possibility, and pass the appropriate code back
487
if( interruptable ) {
488
if (signal_pending(current)) {
489
spin_unlock_irqrestore(&device_request_lock, flags);
495
spin_unlock_irqrestore(&device_request_lock, flags);
500
SCpnt->request.rq_status = RQ_SCSI_BUSY;
501
SCpnt->request.waiting = NULL; /* And no one is waiting for this
503
atomic_inc(&SCpnt->host->host_active);
504
atomic_inc(&SCpnt->device->device_active);
506
SCpnt->buffer = NULL;
508
SCpnt->request_buffer = NULL;
509
SCpnt->request_bufflen = 0;
511
SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
512
SCpnt->old_use_sg = 0;
513
SCpnt->transfersize = 0; /* No default transfer size */
516
SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
517
SCpnt->sc_request = NULL;
518
SCpnt->sc_magic = SCSI_CMND_MAGIC;
521
SCpnt->underflow = 0; /* Do not flag underflow conditions */
522
SCpnt->old_underflow = 0;
524
SCpnt->state = SCSI_STATE_INITIALIZING;
525
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
527
spin_unlock_irqrestore(&device_request_lock, flags);
529
SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
531
atomic_read(&SCpnt->host->host_active)));
536
inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
541
spin_lock_irqsave(&device_request_lock, flags);
543
SDpnt = SCpnt->device;
545
SCpnt->request.rq_status = RQ_INACTIVE;
546
SCpnt->state = SCSI_STATE_UNUSED;
547
SCpnt->owner = SCSI_OWNER_NOBODY;
548
atomic_dec(&SCpnt->host->host_active);
549
atomic_dec(&SDpnt->device_active);
551
SCSI_LOG_MLQUEUE(5, printk(
552
"Deactivating command for device %d (active=%d, failed=%d)\n",
554
atomic_read(&SCpnt->host->host_active),
555
SCpnt->host->host_failed));
556
if (SCpnt->host->host_failed != 0) {
557
SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
558
SCpnt->host->in_recovery,
559
SCpnt->host->eh_active));
562
* If the host is having troubles, then look to see if this was the last
563
* command that might have failed. If so, wake up the error handler.
565
if (SCpnt->host->in_recovery
566
&& !SCpnt->host->eh_active
567
&& SCpnt->host->host_busy == SCpnt->host->host_failed) {
569
SCSI_LOG_ERROR_RECOVERY(5, printk(
570
"Waking error handler thread (%d)\n",
571
atomic_read(&SCpnt->host->eh_wait->count)));
572
up(SCpnt->host->eh_wait);
576
spin_unlock_irqrestore(&device_request_lock, flags);
580
* Wake up anyone waiting for this device. Do this after we
581
* have released the lock, as they will need it as soon as
584
wake_up(&SDpnt->scpnt_wait);
590
* Function: scsi_release_command
592
* Purpose: Release a command block.
594
* Arguments: SCpnt - command block we are releasing.
596
* Notes: The command block can no longer be used by the caller once
597
* this funciton is called. This is in effect the inverse
598
* of scsi_allocate_device. Note that we also must perform
599
* a couple of additional tasks. We must first wake up any
600
* processes that might have blocked waiting for a command
601
* block, and secondly we must hit the queue handler function
602
* to make sure that the device is busy. Note - there is an
603
* option to not do this - there were instances where we could
604
* recurse too deeply and blow the stack if this happened
605
* when we were indirectly called from the request function
608
* The idea is that a lot of the mid-level internals gunk
609
* gets hidden in this function. Upper level drivers don't
610
* have any chickens to wave in the air to get things to
613
* This function is deprecated, and drivers should be
614
* rewritten to use Scsi_Request instead of Scsi_Cmnd.
616
void scsi_release_command(Scsi_Cmnd * SCpnt)
621
SDpnt = SCpnt->device;
623
__scsi_release_command(SCpnt);
626
* Finally, hit the queue request function to make sure that
627
* the device is actually busy if there are requests present.
628
* This won't block - if the device cannot take any more, life
631
q = &SDpnt->request_queue;
632
scsi_queue_next_request(q, NULL);
636
* Function: scsi_dispatch_command
638
* Purpose: Dispatch a command to the low-level driver.
640
* Arguments: SCpnt - command block we are dispatching.
644
int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
649
struct Scsi_Host *host;
651
unsigned long flags = 0;
652
unsigned long timeout;
654
ASSERT_LOCK(&io_request_lock, 0);
657
unsigned long *ret = 0;
659
__asm__ __volatile__("move\t%0,$31":"=r"(ret));
661
ret = __builtin_return_address(0);
667
/* Assign a unique nonzero serial_number. */
668
if (++serial_number == 0)
670
SCpnt->serial_number = serial_number;
671
SCpnt->pid = scsi_pid++;
674
* We will wait MIN_RESET_DELAY clock ticks after the last reset so
675
* we can avoid the drive not being ready.
677
timeout = host->last_reset + MIN_RESET_DELAY;
679
if (host->resetting && time_before(jiffies, timeout)) {
680
int ticks_remaining = timeout - jiffies;
682
* NOTE: This may be executed from within an interrupt
683
* handler! This is bad, but for now, it'll do. The irq
684
* level of the interrupt handler has been masked out by the
685
* platform dependent interrupt handling code already, so the
686
* sti() here will not cause another call to the SCSI host's
687
* interrupt handler (assuming there is one irq-level per
690
while (--ticks_remaining >= 0)
691
mdelay(1 + 999 / HZ);
694
if (host->hostt->use_new_eh_code) {
695
scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
697
scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
702
* We will use a queued command if possible, otherwise we will emulate the
703
* queuing and calling of completion function ourselves.
705
SCSI_LOG_MLQUEUE(3, printk(
706
"scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
707
"command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
708
SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
709
SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
711
SCpnt->state = SCSI_STATE_QUEUED;
712
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
713
if (host->can_queue) {
714
SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
715
host->hostt->queuecommand));
717
* Use the old error handling code if we haven't converted the driver
718
* to use the new one yet. Note - only the new queuecommand variant
719
* passes a meaningful return value.
721
if (host->hostt->use_new_eh_code) {
723
* Before we queue this command, check if the command
724
* length exceeds what the host adapter can handle.
726
if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
727
spin_lock_irqsave(&io_request_lock, flags);
728
rtn = host->hostt->queuecommand(SCpnt, scsi_done);
729
spin_unlock_irqrestore(&io_request_lock, flags);
731
scsi_delete_timer(SCpnt);
732
scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
733
SCSI_LOG_MLQUEUE(3, printk(
734
"queuecommand : request rejected\n"));
737
SCSI_LOG_MLQUEUE(3, printk(
738
"queuecommand : command too long.\n"));
739
SCpnt->result = (DID_ABORT << 16);
740
spin_lock_irqsave(&io_request_lock, flags);
742
spin_unlock_irqrestore(&io_request_lock, flags);
747
* Before we queue this command, check if the command
748
* length exceeds what the host adapter can handle.
750
if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
751
spin_lock_irqsave(&io_request_lock, flags);
752
host->hostt->queuecommand(SCpnt, scsi_old_done);
753
spin_unlock_irqrestore(&io_request_lock, flags);
755
SCSI_LOG_MLQUEUE(3, printk(
756
"queuecommand : command too long.\n"));
757
SCpnt->result = (DID_ABORT << 16);
758
spin_lock_irqsave(&io_request_lock, flags);
759
scsi_old_done(SCpnt);
760
spin_unlock_irqrestore(&io_request_lock, flags);
768
SCSI_LOG_MLQUEUE(3, printk(
769
"command() : routine at %p\n", host->hostt->command));
770
spin_lock_irqsave(&io_request_lock, flags);
771
temp = host->hostt->command(SCpnt);
772
SCpnt->result = temp;
774
spin_unlock_irqrestore(&io_request_lock, flags);
775
clock = jiffies + 4 * HZ;
776
while (time_before(jiffies, clock)) {
780
printk("done(host = %d, result = %04x) : routine at %p\n",
781
host->host_no, temp, host->hostt->command);
782
spin_lock_irqsave(&io_request_lock, flags);
784
if (host->hostt->use_new_eh_code) {
787
scsi_old_done(SCpnt);
789
spin_unlock_irqrestore(&io_request_lock, flags);
791
SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
795
#ifdef DEVFS_MUST_DIE
796
devfs_handle_t scsi_devfs_handle;
800
* scsi_do_cmd sends all the commands out to the low-level driver. It
801
* handles the specifics required for each low level driver - ie queued
802
* or non queued. It also prevents conflicts when different high level
803
* drivers go for the same host at the same time.
806
void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
807
void *buffer, unsigned bufflen,
808
int timeout, int retries)
811
DECLARE_COMPLETION(wait);
818
request_queue_t *q = &SRpnt->sr_device->request_queue;
821
SRpnt->sr_request.waiting = &wait;
823
SRpnt->sr_request.waiting = (void *)&wait;
827
SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
828
scsi_do_req (SRpnt, (void *) cmnd,
829
buffer, bufflen, scsi_wait_done, timeout, retries);
830
generic_unplug_device(q);
834
wait_for_completion(&wait);
835
SRpnt->sr_request.waiting = NULL;
838
/* XXX SMH: in 'standard' driver we think everythings ok here since
839
we've waited on &wait -- hence we deallocate the command structure
840
if it hasn't been done already. This is not the correct behaviour
841
in xen ... hmm .. how to fix? */
843
do_softirq(); /* XXX KAF: this is safe, and necessary!! */
846
if(usecs > 1000000) {
847
printk("scsi_wait_req: still waiting...!\n");
854
if( SRpnt->sr_command != NULL )
856
scsi_release_command(SRpnt->sr_command);
857
SRpnt->sr_command = NULL;
863
* Function: scsi_do_req
865
* Purpose: Queue a SCSI request
867
* Arguments: SRpnt - command descriptor.
868
* cmnd - actual SCSI command to be performed.
869
* buffer - data buffer.
870
* bufflen - size of data buffer.
871
* done - completion function to be run.
872
* timeout - how long to let it run before timeout.
873
* retries - number of retries we allow.
875
* Lock status: With the new queueing code, this is SMP-safe, and no locks
876
* need be held upon entry. The old queueing code the lock was
877
* assumed to be held upon entry.
881
* Notes: Prior to the new queue code, this function was not SMP-safe.
882
* Also, this function is now only used for queueing requests
883
* for things like ioctls and character device requests - this
884
* is because we essentially just inject a request into the
885
* queue for the device. Normal block device handling manipulates
886
* the queue directly.
888
void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
889
void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
890
int timeout, int retries)
892
Scsi_Device * SDpnt = SRpnt->sr_device;
893
struct Scsi_Host *host = SDpnt->host;
895
ASSERT_LOCK(&io_request_lock, 0);
900
int target = SDpnt->id;
901
int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
902
printk("scsi_do_req (host = %d, channel = %d target = %d, "
903
"buffer =%p, bufflen = %d, done = %p, timeout = %d, "
905
"command : ", host->host_no, SDpnt->channel, target, buffer,
906
bufflen, done, timeout, retries);
907
for (i = 0; i < size; ++i)
908
printk("%02x ", ((unsigned char *) cmnd)[i]);
913
panic("Invalid or not present host.\n");
917
* If the upper level driver is reusing these things, then
918
* we should release the low-level block now. Another one will
919
* be allocated later when this request is getting queued.
921
if( SRpnt->sr_command != NULL )
923
scsi_release_command(SRpnt->sr_command);
924
SRpnt->sr_command = NULL;
928
* We must prevent reentrancy to the lowlevel host driver.
929
* This prevents it - we enter a loop until the host we want
930
* to talk to is not busy. Race conditions are prevented, as
931
* interrupts are disabled in between the time we check for
932
* the host being not busy, and the time we mark it busy
937
* Our own function scsi_done (which marks the host as not
938
* busy, disables the timeout counter, etc) will be called by
939
* us or by the scsi_hosts[host].queuecommand() function needs
940
* to also call the completion function for the high level
943
memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
944
sizeof(SRpnt->sr_cmnd));
946
SRpnt->sr_bufflen = bufflen;
947
SRpnt->sr_buffer = buffer;
948
SRpnt->sr_allowed = retries;
949
SRpnt->sr_done = done;
950
SRpnt->sr_timeout_per_command = timeout;
952
if (SRpnt->sr_cmd_len == 0)
953
SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
956
* At this point, we merely set up the command, stick it in the normal
957
* request queue, and return. Eventually that request will come to the
958
* top of the list, and will be dispatched.
960
scsi_insert_special_req(SRpnt, 0);
962
SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
966
* Function: scsi_init_cmd_from_req
968
* Purpose: Queue a SCSI command
969
* Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
971
* Arguments: SCpnt - command descriptor.
972
* SRpnt - Request from the queue.
974
* Lock status: None needed.
978
* Notes: Mainly transfer data from the request structure to the
979
* command structure. The request structure is allocated
980
* using the normal memory allocator, and requests can pile
981
* up to more or less any depth. The command structure represents
982
* a consumable resource, as these are allocated into a pool
983
* when the SCSI subsystem initializes. The preallocation is
984
* required so that in low-memory situations a disk I/O request
985
* won't cause the memory manager to try and write out a page.
986
* The request structure is generally used by ioctls and character
989
void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
991
struct Scsi_Host *host = SCpnt->host;
993
ASSERT_LOCK(&io_request_lock, 0);
995
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
996
SRpnt->sr_command = SCpnt;
999
panic("Invalid or not present host.\n");
1002
SCpnt->cmd_len = SRpnt->sr_cmd_len;
1003
SCpnt->use_sg = SRpnt->sr_use_sg;
1005
memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
1006
sizeof(SRpnt->sr_request));
1007
memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
1008
sizeof(SCpnt->data_cmnd));
1009
SCpnt->reset_chain = NULL;
1010
SCpnt->serial_number = 0;
1011
SCpnt->serial_number_at_timeout = 0;
1012
SCpnt->bufflen = SRpnt->sr_bufflen;
1013
SCpnt->buffer = SRpnt->sr_buffer;
1016
SCpnt->allowed = SRpnt->sr_allowed;
1017
SCpnt->done = SRpnt->sr_done;
1018
SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
1020
SCpnt->sc_data_direction = SRpnt->sr_data_direction;
1022
SCpnt->sglist_len = SRpnt->sr_sglist_len;
1023
SCpnt->underflow = SRpnt->sr_underflow;
1025
SCpnt->sc_request = SRpnt;
1027
memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
1028
sizeof(SCpnt->cmnd));
1029
/* Zero the sense buffer. Some host adapters automatically request
1030
* sense on error. 0 is not a valid sense code.
1032
memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1033
SCpnt->request_buffer = SRpnt->sr_buffer;
1034
SCpnt->request_bufflen = SRpnt->sr_bufflen;
1035
SCpnt->old_use_sg = SCpnt->use_sg;
1036
if (SCpnt->cmd_len == 0)
1037
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1038
SCpnt->old_cmd_len = SCpnt->cmd_len;
1039
SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1040
SCpnt->old_underflow = SCpnt->underflow;
1042
/* Start the timer ticking. */
1044
SCpnt->internal_timeout = NORMAL_TIMEOUT;
1045
SCpnt->abort_reason = 0;
1048
SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
1052
* Function: scsi_do_cmd
1054
* Purpose: Queue a SCSI command
1056
* Arguments: SCpnt - command descriptor.
1057
* cmnd - actual SCSI command to be performed.
1058
* buffer - data buffer.
1059
* bufflen - size of data buffer.
1060
* done - completion function to be run.
1061
* timeout - how long to let it run before timeout.
1062
* retries - number of retries we allow.
1064
* Lock status: With the new queueing code, this is SMP-safe, and no locks
1065
* need be held upon entry. The old queueing code the lock was
1066
* assumed to be held upon entry.
1070
* Notes: Prior to the new queue code, this function was not SMP-safe.
1071
* Also, this function is now only used for queueing requests
1072
* for things like ioctls and character device requests - this
1073
* is because we essentially just inject a request into the
1074
* queue for the device. Normal block device handling manipulates
1075
* the queue directly.
1077
void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1078
void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1079
int timeout, int retries)
1081
struct Scsi_Host *host = SCpnt->host;
1083
ASSERT_LOCK(&io_request_lock, 0);
1085
SCpnt->pid = scsi_pid++;
1086
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1091
int target = SCpnt->target;
1092
int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
1093
printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1094
"buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1096
"command : ", host->host_no, SCpnt->channel, target, buffer,
1097
bufflen, done, timeout, retries);
1098
for (i = 0; i < size; ++i)
1099
printk("%02x ", ((unsigned char *) cmnd)[i]);
1104
panic("Invalid or not present host.\n");
1107
* We must prevent reentrancy to the lowlevel host driver. This prevents
1108
* it - we enter a loop until the host we want to talk to is not busy.
1109
* Race conditions are prevented, as interrupts are disabled in between the
1110
* time we check for the host being not busy, and the time we mark it busy
1116
* Our own function scsi_done (which marks the host as not busy, disables
1117
* the timeout counter, etc) will be called by us or by the
1118
* scsi_hosts[host].queuecommand() function needs to also call
1119
* the completion function for the high level driver.
1122
memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1123
sizeof(SCpnt->data_cmnd));
1124
SCpnt->reset_chain = NULL;
1125
SCpnt->serial_number = 0;
1126
SCpnt->serial_number_at_timeout = 0;
1127
SCpnt->bufflen = bufflen;
1128
SCpnt->buffer = buffer;
1131
SCpnt->allowed = retries;
1133
SCpnt->timeout_per_command = timeout;
1135
memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1136
sizeof(SCpnt->cmnd));
1137
/* Zero the sense buffer. Some host adapters automatically request
1138
* sense on error. 0 is not a valid sense code.
1140
memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1141
SCpnt->request_buffer = buffer;
1142
SCpnt->request_bufflen = bufflen;
1143
SCpnt->old_use_sg = SCpnt->use_sg;
1144
if (SCpnt->cmd_len == 0)
1145
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1146
SCpnt->old_cmd_len = SCpnt->cmd_len;
1147
SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1148
SCpnt->old_underflow = SCpnt->underflow;
1150
/* Start the timer ticking. */
1152
SCpnt->internal_timeout = NORMAL_TIMEOUT;
1153
SCpnt->abort_reason = 0;
1157
* At this point, we merely set up the command, stick it in the normal
1158
* request queue, and return. Eventually that request will come to the
1159
* top of the list, and will be dispatched.
1161
scsi_insert_special_cmd(SCpnt, 0);
1163
SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1167
* This function is the mid-level interrupt routine, which decides how
1168
* to handle error conditions. Each invocation of this function must
1169
* do one and *only* one of the following:
1171
* 1) Insert command in BH queue.
1172
* 2) Activate error handler for host.
1174
* FIXME(eric) - I am concerned about stack overflow (still). An
1175
* interrupt could come while we are processing the bottom queue,
1176
* which would cause another command to be stuffed onto the bottom
1177
* queue, and it would in turn be processed as that interrupt handler
1178
* is returning. Given a sufficiently steady rate of returning
1179
* commands, this could cause the stack to overflow. I am not sure
1180
* what is the most appropriate solution here - we should probably
1181
* keep a depth count, and not process any commands while we still
1182
* have a bottom handler active higher in the stack.
1184
* There is currently code in the bottom half handler to monitor
1185
* recursion in the bottom handler and report if it ever happens. If
1186
* this becomes a problem, it won't be hard to engineer something to
1187
* deal with it so that only the outer layer ever does any real
1190
void scsi_done(Scsi_Cmnd * SCpnt)
1192
unsigned long flags;
1196
* We don't have to worry about this one timing out any more.
1198
tstatus = scsi_delete_timer(SCpnt);
1201
* If we are unable to remove the timer, it means that the command
1202
* has already timed out. In this case, we have no choice but to
1203
* let the timeout function run, as we have no idea where in fact
1204
* that function could really be. It might be on another processor,
1208
SCpnt->done_late = 1;
1211
/* Set the serial numbers back to zero */
1212
SCpnt->serial_number = 0;
1215
* First, see whether this command already timed out. If so, we ignore
1216
* the response. We treat it as if the command never finished.
1218
* Since serial_number is now 0, the error handler cound detect this
1219
* situation and avoid to call the low level driver abort routine.
1222
* FIXME(eric) - I believe that this test is now redundant, due to
1223
* the test of the return status of del_timer().
1225
if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1226
SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1229
spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1231
SCpnt->serial_number_at_timeout = 0;
1232
SCpnt->state = SCSI_STATE_BHQUEUE;
1233
SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1234
SCpnt->bh_next = NULL;
1237
* Next, put this command in the BH queue.
1239
* We need a spinlock here, or compare and exchange if we can reorder incoming
1240
* Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1241
* before bh is serviced. -jj
1243
* We already have the io_request_lock here, since we are called from the
1244
* interrupt handler or the error handler. (DB)
1246
* This may be true at the moment, but I would like to wean all of the low
1247
* level drivers away from using io_request_lock. Technically they should
1248
* all use their own locking. I am adding a small spinlock to protect
1249
* this datastructure to make it safe for that day. (ERY)
1251
if (!scsi_bh_queue_head) {
1252
scsi_bh_queue_head = SCpnt;
1253
scsi_bh_queue_tail = SCpnt;
1255
scsi_bh_queue_tail->bh_next = SCpnt;
1256
scsi_bh_queue_tail = SCpnt;
1259
spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1261
* Mark the bottom half handler to be run.
1267
* Procedure: scsi_bottom_half_handler
1269
* Purpose: Called after we have finished processing interrupts, it
1270
* performs post-interrupt handling for commands that may
1273
* Notes: This is called with all interrupts enabled. This should reduce
1274
* interrupt latency, stack depth, and reentrancy of the low-level
1277
* The io_request_lock is required in all the routine. There was a subtle
1278
* race condition when scsi_done is called after a command has already
1279
* timed out but before the time out is processed by the error handler.
1282
* I believe I have corrected this. We simply monitor the return status of
1283
* del_timer() - if this comes back as 0, it means that the timer has fired
1284
* and that a timeout is in progress. I have modified scsi_done() such
1285
* that in this instance the command is never inserted in the bottom
1286
* half queue. Thus the only time we hold the lock here is when
1287
* we wish to atomically remove the contents of the queue.
1289
void scsi_bottom_half_handler(void)
1293
unsigned long flags;
1297
spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1298
SCpnt = scsi_bh_queue_head;
1299
scsi_bh_queue_head = NULL;
1300
spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1302
if (SCpnt == NULL) {
1305
SCnext = SCpnt->bh_next;
1307
for (; SCpnt; SCpnt = SCnext) {
1308
SCnext = SCpnt->bh_next;
1310
switch (scsi_decide_disposition(SCpnt)) {
1315
SCSI_LOG_MLCOMPLETE(3,
1316
printk("Command finished %d %d 0x%x\n",
1317
SCpnt->host->host_busy,
1318
SCpnt->host->host_failed,
1321
scsi_finish_command(SCpnt);
1325
* We only come in here if we want to retry a command.
1326
* The test to see whether the command should be
1327
* retried should be keeping track of the number of
1328
* tries, so we don't end up looping, of course. */
1329
SCSI_LOG_MLCOMPLETE(3,
1330
printk("Command needs retry %d %d 0x%x\n",
1331
SCpnt->host->host_busy,
1332
SCpnt->host->host_failed,
1335
scsi_retry_command(SCpnt);
1337
case ADD_TO_MLQUEUE:
1339
* This typically happens for a QUEUE_FULL message -
1340
* typically only when the queue depth is only
1341
* approximate for a given device. Adding a command
1342
* to the queue for the device will prevent further commands
1343
* from being sent to the device, so we shouldn't end up
1344
* with tons of things being sent down that shouldn't be.
1346
SCSI_LOG_MLCOMPLETE(3, printk(
1347
"Cmnd rejected as device queue full, put on ml queue %p\n",
1349
scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1353
* Here we have a fatal error of some sort. Turn it over to
1354
* the error handler.
1356
SCSI_LOG_MLCOMPLETE(3, printk(
1357
"Command failed %p %x active=%d busy=%d failed=%d\n",
1358
SCpnt, SCpnt->result,
1359
atomic_read(&SCpnt->host->host_active),
1360
SCpnt->host->host_busy,
1361
SCpnt->host->host_failed));
1364
* Dump the sense information too.
1366
if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1367
SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1369
if (SCpnt->host->eh_wait != NULL) {
1370
SCpnt->host->host_failed++;
1371
SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1372
SCpnt->state = SCSI_STATE_FAILED;
1373
SCpnt->host->in_recovery = 1;
1375
* If the host is having troubles, then look to
1376
* see if this was the last command that might
1377
* have failed. If so, wake up the error handler. */
1378
if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1380
SCSI_LOG_ERROR_RECOVERY(5, printk(
1381
"Waking error handler thread (%d)\n",
1382
atomic_read(&SCpnt->host->eh_wait->count)));
1383
up(SCpnt->host->eh_wait);
1388
* We only get here if the error recovery thread has died.
1390
printk("scsi_bh: error finish\n");
1391
scsi_finish_command(SCpnt);
1394
} /* for(; SCpnt...) */
1401
* Function: scsi_retry_command
1403
* Purpose: Send a command back to the low level to be retried.
1405
* Notes: This command is always executed in the context of the
1406
* bottom half handler, or the error handler thread. Low
1407
* level drivers should not become re-entrant as a result of
1410
int scsi_retry_command(Scsi_Cmnd * SCpnt)
1412
memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1413
sizeof(SCpnt->data_cmnd));
1414
SCpnt->request_buffer = SCpnt->buffer;
1415
SCpnt->request_bufflen = SCpnt->bufflen;
1416
SCpnt->use_sg = SCpnt->old_use_sg;
1417
SCpnt->cmd_len = SCpnt->old_cmd_len;
1418
SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1419
SCpnt->underflow = SCpnt->old_underflow;
1422
* Zero the sense information from the last time we tried
1425
memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1427
return scsi_dispatch_cmd(SCpnt);
1431
* Function: scsi_finish_command
1433
* Purpose: Pass command off to upper layer for finishing of I/O
1434
* request, waking processes that are waiting on results,
1437
void scsi_finish_command(Scsi_Cmnd * SCpnt)
1439
struct Scsi_Host *host;
1440
Scsi_Device *device;
1441
Scsi_Request * SRpnt;
1442
unsigned long flags;
1444
ASSERT_LOCK(&io_request_lock, 0);
1447
device = SCpnt->device;
1450
* We need to protect the decrement, as otherwise a race condition
1451
* would exist. Fiddling with SCpnt isn't a problem as the
1452
* design only allows a single SCpnt to be active in only
1453
* one execution context, but the device and host structures are
1456
spin_lock_irqsave(&io_request_lock, flags);
1457
host->host_busy--; /* Indicate that we are free */
1458
device->device_busy--; /* Decrement device usage counter. */
1459
spin_unlock_irqrestore(&io_request_lock, flags);
1462
* Clear the flags which say that the device/host is no longer
1463
* capable of accepting new commands. These are set in scsi_queue.c
1464
* for both the queue full condition on a device, and for a
1465
* host full condition on the host.
1467
host->host_blocked = FALSE;
1468
device->device_blocked = FALSE;
1471
* If we have valid sense information, then some kind of recovery
1472
* must have taken place. Make a note of this.
1474
if (scsi_sense_valid(SCpnt)) {
1475
SCpnt->result |= (DRIVER_SENSE << 24);
1477
SCSI_LOG_MLCOMPLETE(3, printk(
1478
"Notifying upper driver of completion for device %d %x\n",
1479
SCpnt->device->id, SCpnt->result));
1481
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1482
SCpnt->state = SCSI_STATE_FINISHED;
1484
/* We can get here with use_sg=0, causing a panic in the
1486
SCpnt->use_sg = SCpnt->old_use_sg;
1489
* If there is an associated request structure, copy the data over
1490
* before we call the * completion function.
1492
SRpnt = SCpnt->sc_request;
1494
if( SRpnt != NULL ) {
1495
if(!SRpnt->sr_command) {
1496
printk("scsi_finish_command: SRpnt=%p, SRpnt->sr_command=%p\n",
1497
SRpnt, SRpnt->sr_command);
1498
printk("SRpnt->freeaddr = %p\n", SRpnt->freeaddr);
1501
SRpnt->sr_result = SRpnt->sr_command->result;
1502
if( SRpnt->sr_result != 0 ) {
1503
memcpy(SRpnt->sr_sense_buffer,
1504
SRpnt->sr_command->sense_buffer,
1505
sizeof(SRpnt->sr_sense_buffer));
1512
static int scsi_register_host(Scsi_Host_Template *);
1513
static int scsi_unregister_host(Scsi_Host_Template *);
1516
* Function: scsi_release_commandblocks()
1518
* Purpose: Release command blocks associated with a device.
1520
* Arguments: SDpnt - device
1524
* Lock status: No locking assumed or required.
1528
void scsi_release_commandblocks(Scsi_Device * SDpnt)
1530
Scsi_Cmnd *SCpnt, *SCnext;
1531
unsigned long flags;
1533
spin_lock_irqsave(&device_request_lock, flags);
1534
for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1535
SDpnt->device_queue = SCnext = SCpnt->next;
1536
kfree((char *) SCpnt);
1538
SDpnt->has_cmdblocks = 0;
1539
SDpnt->queue_depth = 0;
1540
spin_unlock_irqrestore(&device_request_lock, flags);
1544
* Function: scsi_build_commandblocks()
1546
* Purpose: Allocate command blocks associated with a device.
1548
* Arguments: SDpnt - device
1552
* Lock status: No locking assumed or required.
1556
void scsi_build_commandblocks(Scsi_Device * SDpnt)
1558
unsigned long flags;
1559
struct Scsi_Host *host = SDpnt->host;
1563
spin_lock_irqsave(&device_request_lock, flags);
1565
if (SDpnt->queue_depth == 0)
1567
SDpnt->queue_depth = host->cmd_per_lun;
1568
if (SDpnt->queue_depth == 0)
1569
SDpnt->queue_depth = 1; /* live to fight another day */
1571
SDpnt->device_queue = NULL;
1573
for (j = 0; j < SDpnt->queue_depth; j++) {
1574
SCpnt = (Scsi_Cmnd *)
1575
kmalloc(sizeof(Scsi_Cmnd),
1577
(host->unchecked_isa_dma ? GFP_DMA : 0));
1579
break; /* If not, the next line will oops ... */
1580
memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1582
SCpnt->device = SDpnt;
1583
SCpnt->target = SDpnt->id;
1584
SCpnt->lun = SDpnt->lun;
1585
SCpnt->channel = SDpnt->channel;
1586
SCpnt->request.rq_status = RQ_INACTIVE;
1588
SCpnt->old_use_sg = 0;
1589
SCpnt->old_cmd_len = 0;
1590
SCpnt->underflow = 0;
1591
SCpnt->old_underflow = 0;
1592
SCpnt->transfersize = 0;
1594
SCpnt->serial_number = 0;
1595
SCpnt->serial_number_at_timeout = 0;
1596
SCpnt->host_scribble = NULL;
1597
SCpnt->next = SDpnt->device_queue;
1598
SDpnt->device_queue = SCpnt;
1599
SCpnt->state = SCSI_STATE_UNUSED;
1600
SCpnt->owner = SCSI_OWNER_NOBODY;
1602
if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1603
printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1604
SDpnt->queue_depth, j);
1605
SDpnt->queue_depth = j;
1606
SDpnt->has_cmdblocks = (0 != j);
1608
SDpnt->has_cmdblocks = 1;
1610
spin_unlock_irqrestore(&device_request_lock, flags);
1613
void __init scsi_host_no_insert(char *str, int n)
1615
Scsi_Host_Name *shn, *shn2;
1619
if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1620
if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1621
strncpy(shn->name, str, len);
1624
shn->host_registered = 0;
1625
shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1627
if (scsi_host_no_list) {
1628
for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1633
scsi_host_no_list = shn;
1634
max_scsi_hosts = n+1;
1637
kfree((char *) shn);
1641
#ifdef CONFIG_PROC_FS
1642
static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1645
struct Scsi_Host *HBA_ptr;
1651
* First, see if there are any attached devices or not.
1653
for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1654
if (HBA_ptr->host_queue != NULL) {
1658
size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1661
for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1663
size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1664
HBA_ptr->hostt->procname);
1668
for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1669
proc_print_scsidevice(scd, buffer, &size, len);
1677
if (pos > offset + length)
1683
*start = buffer + (offset - begin); /* Start of wanted data */
1684
len -= (offset - begin); /* Start slop */
1686
len = length; /* Ending slop */
1690
static int proc_scsi_gen_write(struct file * file, const char * buf,
1691
unsigned long length, void *data)
1693
struct Scsi_Device_Template *SDTpnt;
1695
struct Scsi_Host *HBA_ptr;
1697
int host, channel, id, lun;
1701
if (!buf || length>PAGE_SIZE)
1704
if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1706
if(copy_from_user(buffer, buf, length))
1714
if (length < PAGE_SIZE)
1715
buffer[length] = '\0';
1716
else if (buffer[PAGE_SIZE-1])
1719
if (length < 11 || strncmp("scsi", buffer, 4))
1723
* Usage: echo "scsi dump #N" > /proc/scsi/scsi
1724
* to dump status of all scsi commands. The number is used to specify the level
1725
* of detail in the dump.
1727
if (!strncmp("dump", buffer + 5, 4)) {
1735
level = simple_strtoul(p, NULL, 0);
1736
scsi_dump_status(level);
1739
* Usage: echo "scsi log token #N" > /proc/scsi/scsi
1740
* where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1741
* llcomplete,hlqueue,hlcomplete]
1743
#ifdef CONFIG_SCSI_LOGGING /* { */
1745
if (!strncmp("log", buffer + 5, 3)) {
1751
while (*p != ' ' && *p != '\t' && *p != '\0') {
1756
if (strncmp(token, "all", 3) == 0) {
1758
* Turn on absolutely everything.
1760
scsi_logging_level = ~0;
1761
} else if (strncmp(token, "none", 4) == 0) {
1763
* Turn off absolutely everything.
1765
scsi_logging_level = 0;
1772
level = simple_strtoul(p, NULL, 0);
1775
* Now figure out what to do with it.
1777
if (strcmp(token, "error") == 0) {
1778
SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1779
} else if (strcmp(token, "timeout") == 0) {
1780
SCSI_SET_TIMEOUT_LOGGING(level);
1781
} else if (strcmp(token, "scan") == 0) {
1782
SCSI_SET_SCAN_BUS_LOGGING(level);
1783
} else if (strcmp(token, "mlqueue") == 0) {
1784
SCSI_SET_MLQUEUE_LOGGING(level);
1785
} else if (strcmp(token, "mlcomplete") == 0) {
1786
SCSI_SET_MLCOMPLETE_LOGGING(level);
1787
} else if (strcmp(token, "llqueue") == 0) {
1788
SCSI_SET_LLQUEUE_LOGGING(level);
1789
} else if (strcmp(token, "llcomplete") == 0) {
1790
SCSI_SET_LLCOMPLETE_LOGGING(level);
1791
} else if (strcmp(token, "hlqueue") == 0) {
1792
SCSI_SET_HLQUEUE_LOGGING(level);
1793
} else if (strcmp(token, "hlcomplete") == 0) {
1794
SCSI_SET_HLCOMPLETE_LOGGING(level);
1795
} else if (strcmp(token, "ioctl") == 0) {
1796
SCSI_SET_IOCTL_LOGGING(level);
1802
printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1804
#endif /* CONFIG_SCSI_LOGGING */ /* } */
1807
* Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1808
* with "0 1 2 3" replaced by your "Host Channel Id Lun".
1809
* Consider this feature BETA.
1810
* CAUTION: This is not for hotplugging your peripherals. As
1811
* SCSI was not designed for this you could damage your
1813
* However perhaps it is legal to switch on an
1814
* already connected device. It is perhaps not
1815
* guaranteed this device doesn't corrupt an ongoing data transfer.
1817
if (!strncmp("add-single-device", buffer + 5, 17)) {
1820
host = simple_strtoul(p, &p, 0);
1821
channel = simple_strtoul(p + 1, &p, 0);
1822
id = simple_strtoul(p + 1, &p, 0);
1823
lun = simple_strtoul(p + 1, &p, 0);
1825
printk(KERN_INFO "scsi singledevice %d %d %d %d\n", host, channel,
1828
for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1829
if (HBA_ptr->host_no == host) {
1837
for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1838
if ((scd->channel == channel
1840
&& scd->lun == lun)) {
1847
goto out; /* We do not yet support unplugging */
1849
scan_scsis(HBA_ptr, 1, channel, id, lun);
1851
/* FIXME (DB) This assumes that the queue_depth routines can be used
1852
in this context as well, while they were all designed to be
1853
called only once after the detect routine. (DB) */
1854
/* queue_depth routine moved to inside scan_scsis(,1,,,) so
1855
it is called before build_commandblocks() */
1861
* Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1862
* with "0 1 2 3" replaced by your "Host Channel Id Lun".
1864
* Consider this feature pre-BETA.
1866
* CAUTION: This is not for hotplugging your peripherals. As
1867
* SCSI was not designed for this you could damage your
1868
* hardware and thoroughly confuse the SCSI subsystem.
1871
else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1874
host = simple_strtoul(p, &p, 0);
1875
channel = simple_strtoul(p + 1, &p, 0);
1876
id = simple_strtoul(p + 1, &p, 0);
1877
lun = simple_strtoul(p + 1, &p, 0);
1880
for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1881
if (HBA_ptr->host_no == host) {
1889
for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1890
if ((scd->channel == channel
1892
&& scd->lun == lun)) {
1898
goto out; /* there is no such device attached */
1901
if (scd->access_count)
1904
SDTpnt = scsi_devicelist;
1905
while (SDTpnt != NULL) {
1907
(*SDTpnt->detach) (scd);
1908
SDTpnt = SDTpnt->next;
1911
if (scd->attached == 0) {
1913
* Nobody is using this device any more.
1914
* Free all of the command structures.
1916
if (HBA_ptr->hostt->revoke)
1917
HBA_ptr->hostt->revoke(scd);
1918
#ifdef DEVFS_MUST_DIE
1919
devfs_unregister (scd->de);
1921
scsi_release_commandblocks(scd);
1923
/* Now we can remove the device structure */
1924
if (scd->next != NULL)
1925
scd->next->prev = scd->prev;
1927
if (scd->prev != NULL)
1928
scd->prev->next = scd->next;
1930
if (HBA_ptr->host_queue == scd) {
1931
HBA_ptr->host_queue = scd->next;
1933
blk_cleanup_queue(&scd->request_queue);
1934
kfree((char *) scd);
1942
free_page((unsigned long) buffer);
1948
* This entry point should be called by a driver if it is trying
1949
* to add a low level scsi driver to the system.
1951
static int scsi_register_host(Scsi_Host_Template * tpnt)
1954
struct Scsi_Host *shpnt;
1956
struct Scsi_Device_Template *sdtpnt;
1958
unsigned long flags;
1959
int out_of_space = 0;
1961
if (tpnt->next || !tpnt->detect)
1962
return 1; /* Must be already loaded, or
1963
* no detect routine available
1966
/* If max_sectors isn't set, default to max */
1967
if (!tpnt->max_sectors)
1968
tpnt->max_sectors = MAX_SECTORS;
1970
pcount = next_scsi_host;
1974
/* The detect routine must carefully spinunlock/spinlock if
1975
it enables interrupts, since all interrupt handlers do
1977
All lame drivers are going to fail due to the following
1978
spinlock. For the time beeing let's use it only for drivers
1979
using the new scsi code. NOTE: the detect routine could
1980
redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1982
if (tpnt->use_new_eh_code) {
1983
spin_lock_irqsave(&io_request_lock, flags);
1984
tpnt->present = tpnt->detect(tpnt);
1985
spin_unlock_irqrestore(&io_request_lock, flags);
1987
tpnt->present = tpnt->detect(tpnt);
1989
if (tpnt->present) {
1990
if (pcount == next_scsi_host) {
1991
if (tpnt->present > 1) {
1992
printk(KERN_ERR "scsi: Failure to register low-level "
1994
scsi_unregister_host(tpnt);
1998
* The low-level driver failed to register a driver.
1999
* We can do this now.
2001
if(scsi_register(tpnt, 0)==NULL)
2003
printk(KERN_ERR "scsi: register failed.\n");
2004
scsi_unregister_host(tpnt);
2008
tpnt->next = scsi_hosts; /* Add to the linked list */
2011
/* Add the new driver to /proc/scsi */
2012
#ifdef CONFIG_PROC_FS
2013
build_proc_dir_entries(tpnt);
2019
* Add the kernel threads for each host adapter that will
2020
* handle error correction.
2022
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2023
if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
2024
DECLARE_MUTEX_LOCKED(sem);
2026
shpnt->eh_notify = &sem;
2027
kernel_thread((int (*)(void *)) scsi_error_handler,
2031
* Now wait for the kernel error thread to initialize itself
2032
* as it might be needed when we scan the bus.
2035
shpnt->eh_notify = NULL;
2040
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2041
if (shpnt->hostt == tpnt) {
2043
name = tpnt->info(shpnt);
2047
printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
2048
shpnt->host_no, name);
2052
/* The next step is to call scan_scsis here. This generates the
2053
* Scsi_Devices entries
2055
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2056
if (shpnt->hostt == tpnt) {
2057
scan_scsis(shpnt, 0, 0, 0, 0);
2058
if (shpnt->select_queue_depths != NULL) {
2059
(shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2064
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2065
if (sdtpnt->init && sdtpnt->dev_noticed)
2070
* Next we create the Scsi_Cmnd structures for this host
2072
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2073
for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2074
if (SDpnt->host->hostt == tpnt) {
2075
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2077
(*sdtpnt->attach) (SDpnt);
2078
if (SDpnt->attached) {
2079
scsi_build_commandblocks(SDpnt);
2080
if (0 == SDpnt->has_cmdblocks)
2087
* Now that we have all of the devices, resize the DMA pool,
2090
scsi_resize_dma_pool();
2093
/* This does any final handling that is required. */
2094
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2095
if (sdtpnt->finish && sdtpnt->nr_dev) {
2096
(*sdtpnt->finish) ();
2100
#if defined(USE_STATIC_SCSI_MEMORY)
2101
printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2102
(scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2103
(scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2104
(scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2108
scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2116
* Similarly, this entry point should be called by a loadable module if it
2117
* is trying to remove a low level scsi driver from the system.
2119
static int scsi_unregister_host(Scsi_Host_Template * tpnt)
2122
int pcount0, pcount;
2125
Scsi_Device *SDpnt1;
2126
struct Scsi_Device_Template *sdtpnt;
2127
struct Scsi_Host *sh1;
2128
struct Scsi_Host *shpnt;
2129
char name[10]; /* host_no>=10^9? I don't think so. */
2132
/* get the big kernel lock, so we don't race with open() */
2137
* First verify that this host adapter is completely free with no pending
2140
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2141
for (SDpnt = shpnt->host_queue; SDpnt;
2142
SDpnt = SDpnt->next) {
2143
if (SDpnt->host->hostt == tpnt
2144
&& SDpnt->host->hostt->module
2145
&& GET_USE_COUNT(SDpnt->host->hostt->module))
2148
* FIXME(eric) - We need to find a way to notify the
2149
* low level driver that we are shutting down - via the
2150
* special device entry that still needs to get added.
2152
* Is detach interface below good enough for this?
2158
* FIXME(eric) put a spinlock on this. We force all of the devices offline
2159
* to help prevent race conditions where other hosts/processors could try and
2160
* get in and queue a command.
2162
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2163
for (SDpnt = shpnt->host_queue; SDpnt;
2164
SDpnt = SDpnt->next) {
2165
if (SDpnt->host->hostt == tpnt)
2166
SDpnt->online = FALSE;
2171
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2172
if (shpnt->hostt != tpnt) {
2175
for (SDpnt = shpnt->host_queue; SDpnt;
2176
SDpnt = SDpnt->next) {
2178
* Loop over all of the commands associated with the device. If any of
2179
* them are busy, then set the state back to inactive and bail.
2181
for (SCpnt = SDpnt->device_queue; SCpnt;
2182
SCpnt = SCpnt->next) {
2183
online_status = SDpnt->online;
2184
SDpnt->online = FALSE;
2185
if (SCpnt->request.rq_status != RQ_INACTIVE) {
2186
printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2187
SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2188
SCpnt->state, SCpnt->owner);
2189
for (SDpnt1 = shpnt->host_queue; SDpnt1;
2190
SDpnt1 = SDpnt1->next) {
2191
for (SCpnt = SDpnt1->device_queue; SCpnt;
2192
SCpnt = SCpnt->next)
2193
if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2194
SCpnt->request.rq_status = RQ_INACTIVE;
2196
SDpnt->online = online_status;
2197
printk(KERN_ERR "Device busy???\n");
2201
* No, this device is really free. Mark it as such, and
2204
SCpnt->state = SCSI_STATE_DISCONNECTING;
2205
SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2209
/* Next we detach the high level drivers from the Scsi_Device structures */
2211
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2212
if (shpnt->hostt != tpnt) {
2215
for (SDpnt = shpnt->host_queue; SDpnt;
2216
SDpnt = SDpnt->next) {
2217
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2219
(*sdtpnt->detach) (SDpnt);
2221
/* If something still attached, punt */
2222
if (SDpnt->attached) {
2223
printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2226
#ifdef DEVFS_MUST_DIE
2227
devfs_unregister (SDpnt->de);
2234
* Next, kill the kernel error recovery thread for this host.
2236
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2237
if (shpnt->hostt == tpnt
2238
&& shpnt->hostt->use_new_eh_code
2239
&& shpnt->ehandler != NULL) {
2240
DECLARE_MUTEX_LOCKED(sem);
2242
shpnt->eh_notify = &sem;
2243
send_sig(SIGHUP, shpnt->ehandler, 1);
2245
shpnt->eh_notify = NULL;
2250
/* Next we free up the Scsi_Cmnd structures for this host */
2252
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2253
if (shpnt->hostt != tpnt) {
2256
for (SDpnt = shpnt->host_queue; SDpnt;
2257
SDpnt = shpnt->host_queue) {
2258
scsi_release_commandblocks(SDpnt);
2260
blk_cleanup_queue(&SDpnt->request_queue);
2261
/* Next free up the Scsi_Device structures for this host */
2262
shpnt->host_queue = SDpnt->next;
2263
kfree((char *) SDpnt);
2268
/* Next we go through and remove the instances of the individual hosts
2269
* that were detected */
2271
pcount0 = next_scsi_host;
2272
for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2274
if (shpnt->hostt != tpnt)
2276
pcount = next_scsi_host;
2277
/* Remove the /proc/scsi directory entry */
2278
sprintf(name,"%d",shpnt->host_no);
2279
#ifdef CONFIG_PROC_FS
2280
remove_proc_entry(name, tpnt->proc_dir);
2283
(*tpnt->release) (shpnt);
2285
/* This is the default case for the release function.
2286
* It should do the right thing for most correctly
2287
* written host adapters.
2290
free_irq(shpnt->irq, NULL);
2293
if (shpnt->dma_channel != 0xff)
2294
free_dma(shpnt->dma_channel);
2296
if (shpnt->io_port && shpnt->n_io_port)
2297
release_region(shpnt->io_port, shpnt->n_io_port);
2299
if (pcount == next_scsi_host)
2300
scsi_unregister(shpnt);
2305
* If there are absolutely no more hosts left, it is safe
2306
* to completely nuke the DMA pool. The resize operation will
2307
* do the right thing and free everything.
2310
scsi_resize_dma_pool();
2312
if (pcount0 != next_scsi_host)
2313
printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2314
(next_scsi_host == 1) ? "" : "s");
2316
#if defined(USE_STATIC_SCSI_MEMORY)
2317
printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2318
(scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2319
(scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2320
(scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2324
* Remove it from the linked list and /proc if all
2325
* hosts were successfully removed (ie preset == 0)
2327
if (!tpnt->present) {
2328
Scsi_Host_Template **SHTp = &scsi_hosts;
2329
Scsi_Host_Template *SHT;
2331
while ((SHT = *SHTp) != NULL) {
2334
#ifdef CONFIG_PROC_FS
2335
remove_proc_entry(tpnt->proc_name, proc_scsi);
2357
static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2360
* This entry point should be called by a loadable module if it is trying
2361
* add a high level scsi driver to the system.
2363
static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2366
struct Scsi_Host *shpnt;
2367
int out_of_space = 0;
2372
scsi_register_device(tpnt);
2374
* First scan the devices that we know about, and see if we notice them.
2377
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2378
for (SDpnt = shpnt->host_queue; SDpnt;
2379
SDpnt = SDpnt->next) {
2381
SDpnt->detected = (*tpnt->detect) (SDpnt);
2386
* If any of the devices would match this driver, then perform the
2389
if (tpnt->init && tpnt->dev_noticed) {
2390
if ((*tpnt->init) ()) {
2391
for (shpnt = scsi_hostlist; shpnt;
2392
shpnt = shpnt->next) {
2393
for (SDpnt = shpnt->host_queue; SDpnt;
2394
SDpnt = SDpnt->next) {
2395
SDpnt->detected = 0;
2398
scsi_deregister_device(tpnt);
2404
* Now actually connect the devices to the new driver.
2406
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2407
for (SDpnt = shpnt->host_queue; SDpnt;
2408
SDpnt = SDpnt->next) {
2409
SDpnt->attached += SDpnt->detected;
2410
SDpnt->detected = 0;
2412
(*tpnt->attach) (SDpnt);
2414
* If this driver attached to the device, and don't have any
2415
* command blocks for this device, allocate some.
2417
if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2418
SDpnt->online = TRUE;
2419
scsi_build_commandblocks(SDpnt);
2420
if (0 == SDpnt->has_cmdblocks)
2427
* This does any final handling that is required.
2429
if (tpnt->finish && tpnt->nr_dev)
2432
scsi_resize_dma_pool();
2436
scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2442
static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2445
struct Scsi_Host *shpnt;
2451
* If we are busy, this is not going to fly.
2453
if (GET_USE_COUNT(tpnt->module) != 0)
2457
* Next, detach the devices from the driver.
2460
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2461
for (SDpnt = shpnt->host_queue; SDpnt;
2462
SDpnt = SDpnt->next) {
2464
(*tpnt->detach) (SDpnt);
2465
if (SDpnt->attached == 0) {
2466
SDpnt->online = FALSE;
2469
* Nobody is using this device any more. Free all of the
2470
* command structures.
2472
scsi_release_commandblocks(SDpnt);
2477
* Extract the template from the linked list.
2479
scsi_deregister_device(tpnt);
2487
* Final cleanup for the driver is done in the driver sources in the
2499
/* This function should be called by drivers which needs to register
2500
* with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2501
* main device/hosts register function /mathiasen
2503
int scsi_register_module(int module_type, void *ptr)
2505
switch (module_type) {
2506
case MODULE_SCSI_HA:
2507
return scsi_register_host((Scsi_Host_Template *) ptr);
2509
/* Load upper level device handler of some kind */
2510
case MODULE_SCSI_DEV:
2512
if (scsi_hosts == NULL)
2513
request_module("scsi_hostadapter");
2515
return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2516
/* The rest of these are not yet implemented */
2518
/* Load constants.o */
2519
case MODULE_SCSI_CONST:
2521
/* Load specialized ioctl handler for some device. Intended for
2522
* cdroms that have non-SCSI2 audio command sets. */
2523
case MODULE_SCSI_IOCTL:
2530
/* Reverse the actions taken above
2532
int scsi_unregister_module(int module_type, void *ptr)
2536
switch (module_type) {
2537
case MODULE_SCSI_HA:
2538
retval = scsi_unregister_host((Scsi_Host_Template *) ptr);
2540
case MODULE_SCSI_DEV:
2541
retval = scsi_unregister_device((struct Scsi_Device_Template *)ptr);
2543
/* The rest of these are not yet implemented. */
2544
case MODULE_SCSI_CONST:
2545
case MODULE_SCSI_IOCTL:
2552
#ifdef CONFIG_PROC_FS
2554
* Function: scsi_dump_status
2556
* Purpose: Brain dump of scsi system, used for problem solving.
2558
* Arguments: level - used to indicate level of detail.
2560
* Notes: The level isn't used at all yet, but we need to find some way
2561
* of sensibly logging varying degrees of information. A quick one-line
2562
* display of each command, plus the status would be most useful.
2564
* This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2565
* it all off if the user wants a lean and mean kernel. It would probably
2566
* also be useful to allow the user to specify one single host to be dumped.
2567
* A second argument to the function would be useful for that purpose.
2569
* FIXME - some formatting of the output into tables would be very handy.
2571
static void scsi_dump_status(int level)
2573
#ifdef CONFIG_SCSI_LOGGING /* { */
2575
struct Scsi_Host *shpnt;
2578
printk(KERN_INFO "Dump of scsi host parameters:\n");
2580
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2581
printk(KERN_INFO " %d %d %d : %d %d\n",
2584
atomic_read(&shpnt->host_active),
2585
shpnt->host_blocked,
2586
shpnt->host_self_blocked);
2589
printk(KERN_INFO "\n\n");
2590
printk(KERN_INFO "Dump of scsi command parameters:\n");
2591
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2592
printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2593
for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2594
for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2595
/* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2596
printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2599
SCpnt->host->host_no,
2604
kdevname(SCpnt->request.rq_dev),
2605
SCpnt->request.sector,
2606
SCpnt->request.nr_sectors,
2607
SCpnt->request.current_nr_sectors,
2608
SCpnt->request.rq_status,
2615
SCpnt->timeout_per_command,
2617
SCpnt->internal_timeout,
2620
SCpnt->sense_buffer[2],
2626
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2627
for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2628
/* Now dump the request lists for each block device */
2629
printk(KERN_INFO "Dump of pending block device requests\n");
2630
for (i = 0; i < MAX_BLKDEV; i++) {
2631
struct list_head * queue_head;
2633
queue_head = &blk_dev[i].request_queue.queue_head;
2634
if (!list_empty(queue_head)) {
2635
struct request *req;
2636
struct list_head * entry;
2638
printk(KERN_INFO "%d: ", i);
2639
entry = queue_head->next;
2641
req = blkdev_entry_to_request(entry);
2642
printk("(%s %d %ld %ld %ld) ",
2643
kdevname(req->rq_dev),
2647
req->current_nr_sectors);
2648
} while ((entry = entry->next) != queue_head);
2654
#endif /* CONFIG_SCSI_LOGGING */ /* } */
2656
#endif /* CONFIG_PROC_FS */
2658
static int __init scsi_host_no_init (char *str)
2660
static int next_no = 0;
2665
while (*temp && (*temp != ':') && (*temp != ','))
2671
scsi_host_no_insert(str, next_no);
2678
static char *scsihosts;
2680
MODULE_PARM(scsihosts, "s");
2681
MODULE_DESCRIPTION("SCSI core");
2682
MODULE_LICENSE("GPL");
2685
int __init scsi_setup(char *str)
2691
__setup("scsihosts=", scsi_setup);
2694
static int __init init_scsi(void)
2696
#ifdef CONFIG_PROC_FS
2697
struct proc_dir_entry *generic;
2700
printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2702
if( scsi_init_minimal_dma_pool() != 0 )
2707
#ifdef CONFIG_PROC_FS
2709
* This makes /proc/scsi and /proc/scsi/scsi visible.
2711
proc_scsi = proc_mkdir("scsi", 0);
2713
printk (KERN_ERR "cannot init /proc/scsi\n");
2716
generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2718
printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2719
remove_proc_entry("scsi", 0);
2722
generic->write_proc = proc_scsi_gen_write;
2725
#ifdef DEVFS_MUST_DIE
2726
scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2729
printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2730
scsi_host_no_init (scsihosts);
2732
* This is where the processing takes place for most everything
2733
* when commands are completed.
2735
init_bh(SCSI_BH, scsi_bottom_half_handler);
2742
static void __exit exit_scsi(void)
2744
Scsi_Host_Name *shn, *shn2 = NULL;
2748
#ifdef DEVFS_MUST_DIE
2749
devfs_unregister (scsi_devfs_handle);
2751
for (shn = scsi_host_no_list;shn;shn = shn->next) {
2761
#ifdef CONFIG_PROC_FS
2762
/* No, we're not here anymore. Don't show the /proc/scsi files. */
2763
remove_proc_entry ("scsi/scsi", 0);
2764
remove_proc_entry ("scsi", 0);
2768
* Free up the DMA pool.
2770
scsi_resize_dma_pool();
2774
module_init(init_scsi);
2775
module_exit(exit_scsi);
2778
* Function: scsi_get_host_dev()
2780
* Purpose: Create a Scsi_Device that points to the host adapter itself.
2782
* Arguments: SHpnt - Host that needs a Scsi_Device
2784
* Lock status: None assumed.
2786
* Returns: The Scsi_Device or NULL
2790
Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2792
Scsi_Device * SDpnt;
2795
* Attach a single Scsi_Device to the Scsi_Host - this should
2796
* be made to look like a "pseudo-device" that points to the
2797
* HA itself. For the moment, we include it at the head of
2798
* the host_queue itself - I don't think we want to show this
2799
* to the HA in select_queue_depths(), as this would probably confuse
2801
* Note - this device is not accessible from any high-level
2802
* drivers (including generics), which is probably not
2803
* optimal. We can add hooks later to attach
2805
SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2810
memset(SDpnt, 0, sizeof(Scsi_Device));
2812
SDpnt->host = SHpnt;
2813
SDpnt->id = SHpnt->this_id;
2815
SDpnt->queue_depth = 1;
2817
scsi_build_commandblocks(SDpnt);
2819
scsi_initialize_queue(SDpnt, SHpnt);
2821
SDpnt->online = TRUE;
2825
* Initialize the object that we will use to wait for command blocks.
2827
init_waitqueue_head(&SDpnt->scpnt_wait);
2833
* Function: scsi_free_host_dev()
2835
* Purpose: Create a Scsi_Device that points to the host adapter itself.
2837
* Arguments: SHpnt - Host that needs a Scsi_Device
2839
* Lock status: None assumed.
2845
void scsi_free_host_dev(Scsi_Device * SDpnt)
2847
if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2849
panic("Attempt to delete wrong device\n");
2852
blk_cleanup_queue(&SDpnt->request_queue);
2855
* We only have a single SCpnt attached to this device. Free
2858
scsi_release_commandblocks(SDpnt);
2863
* Function: scsi_reset_provider_done_command
2865
* Purpose: Dummy done routine.
2867
* Notes: Some low level drivers will call scsi_done and end up here,
2868
* others won't bother.
2869
* We don't want the bogus command used for the bus/device
2870
* reset to find its way into the mid-layer so we intercept
2874
scsi_reset_provider_done_command(Scsi_Cmnd *SCpnt)
2879
* Function: scsi_reset_provider
2881
* Purpose: Send requested reset to a bus or device at any phase.
2883
* Arguments: device - device to send reset to
2884
* flag - reset type (see scsi.h)
2886
* Returns: SUCCESS/FAILURE.
2888
* Notes: This is used by the SCSI Generic driver to provide
2889
* Bus/Device reset capability.
2892
scsi_reset_provider(Scsi_Device *dev, int flag)
2894
Scsi_Cmnd SC, *SCpnt = &SC;
2897
memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
2898
SCpnt->host = dev->host;
2899
SCpnt->device = dev;
2900
SCpnt->target = dev->id;
2901
SCpnt->lun = dev->lun;
2902
SCpnt->channel = dev->channel;
2903
SCpnt->request.rq_status = RQ_SCSI_BUSY;
2904
SCpnt->request.waiting = NULL;
2906
SCpnt->old_use_sg = 0;
2907
SCpnt->old_cmd_len = 0;
2908
SCpnt->underflow = 0;
2909
SCpnt->transfersize = 0;
2911
SCpnt->serial_number = 0;
2912
SCpnt->serial_number_at_timeout = 0;
2913
SCpnt->host_scribble = NULL;
2915
SCpnt->state = SCSI_STATE_INITIALIZING;
2916
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
2918
memset(&SCpnt->cmnd, '\0', sizeof(SCpnt->cmnd));
2920
SCpnt->scsi_done = scsi_reset_provider_done_command;
2922
SCpnt->reset_chain = NULL;
2924
SCpnt->buffer = NULL;
2926
SCpnt->request_buffer = NULL;
2927
SCpnt->request_bufflen = 0;
2929
SCpnt->internal_timeout = NORMAL_TIMEOUT;
2930
SCpnt->abort_reason = DID_ABORT;
2934
SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
2935
SCpnt->sc_request = NULL;
2936
SCpnt->sc_magic = SCSI_CMND_MAGIC;
2939
* Sometimes the command can get back into the timer chain,
2940
* so use the pid as an identifier.
2944
if (dev->host->hostt->use_new_eh_code) {
2945
rtn = scsi_new_reset(SCpnt, flag);
2947
unsigned long flags;
2949
spin_lock_irqsave(&io_request_lock, flags);
2950
rtn = scsi_old_reset(SCpnt, flag);
2951
spin_unlock_irqrestore(&io_request_lock, flags);
2955
scsi_delete_timer(SCpnt);
2960
* Overrides for Emacs so that we follow Linus's tabbing style.
2961
* Emacs will notice this stuff at the end of the file and automatically
2962
* adjust the settings for this buffer only. This must remain at the end
2964
* ---------------------------------------------------------------------------
2967
* c-brace-imaginary-offset: 0
2968
* c-brace-offset: -4
2969
* c-argdecl-indent: 4
2970
* c-label-offset: -4
2971
* c-continued-statement-offset: 4
2972
* c-continued-brace-offset: 0
2973
* indent-tabs-mode: nil