2
* Adaptec AAC series RAID controller driver
3
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5
* based on the old aacraid driver that is..
7
* Adaptec aacraid device driver for Linux.
9
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
11
* This program is free software; you can redistribute it and/or modify
12
* it under the terms of the GNU General Public License as published by
13
* the Free Software Foundation; either version 2, or (at your option)
16
* This program is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
* GNU General Public License for more details.
21
* You should have received a copy of the GNU General Public License
22
* along with this program; see the file COPYING. If not, write to
23
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28
* Abstract: Contain all routines that are required for FSA host/adapter
34
#include <linux/config.h>
35
#include <linux/kernel.h>
36
#include <linux/init.h>
37
#include <linux/types.h>
38
#include <linux/sched.h>
39
#include <linux/pci.h>
40
#include <linux/spinlock.h>
41
#include <linux/slab.h>
42
/*#include <linux/completion.h>*/
43
/*#include <asm/semaphore.h>*/
44
#include <linux/blk.h>
45
#include <asm/uaccess.h>
47
#include <xeno/interrupt.h>
48
#include <xeno/delay.h>
56
* fib_map_alloc - allocate the fib objects
57
* @dev: Adapter to allocate for
59
* Allocate and map the shared PCI space for the FIB blocks used to
60
* talk to the Adaptec firmware.
63
static int fib_map_alloc(struct aac_dev *dev)
65
if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
71
* fib_map_free - free the fib objects
72
* @dev: Adapter to free
74
* Free the PCI mappings and the memory allocated for FIB blocks
78
void fib_map_free(struct aac_dev *dev)
80
pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
84
* fib_setup - setup the fibs
85
* @dev: Adapter to set up
87
* Allocate the PCI space for the fibs, map it and then intialise the
88
* fib area, the unmapped fib data and also the free list
91
int fib_setup(struct aac_dev * dev)
94
struct hw_fib *hw_fib_va;
98
if(fib_map_alloc(dev)<0)
101
hw_fib_va = dev->hw_fib_va;
102
hw_fib_pa = dev->hw_fib_pa;
103
memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
105
* Initialise the fibs
107
for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++)
110
fibptr->hw_fib = hw_fib_va;
111
fibptr->data = (void *) fibptr->hw_fib->data;
112
fibptr->next = fibptr+1; /* Forward chain the fibs */
114
init_MUTEX_LOCKED(&fibptr->event_wait);
116
spin_lock_init(&fibptr->event_lock);
117
hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
118
hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
119
fibptr->hw_fib_pa = hw_fib_pa;
120
hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
121
hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib);
124
* Add the fib chain to the free list
126
dev->fibs[AAC_NUM_FIB-1].next = NULL;
128
* Enable this to debug out of queue space
130
dev->free_fib = &dev->fibs[0];
135
* fib_alloc - allocate a fib
136
* @dev: Adapter to allocate the fib for
138
* Allocate a fib from the adapter fib pool. If the pool is empty we
139
* wait for fibs to become free.
142
struct fib * fib_alloc(struct aac_dev *dev)
147
spin_lock_irqsave(&dev->fib_lock, flags);
148
fibptr = dev->free_fib;
151
dev->free_fib = fibptr->next;
152
spin_unlock_irqrestore(&dev->fib_lock, flags);
154
* Set the proper node type code and node byte size
156
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
157
fibptr->size = sizeof(struct fib);
159
* Null out fields that depend on being zero at the start of
162
fibptr->hw_fib->header.XferState = cpu_to_le32(0);
163
fibptr->callback = NULL;
164
fibptr->callback_data = NULL;
170
* fib_free - free a fib
171
* @fibptr: fib to free up
173
* Frees up a fib and places it on the appropriate queue
174
* (either free or timed out)
177
void fib_free(struct fib * fibptr)
181
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
183
if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
184
aac_config.fib_timeouts++;
185
fibptr->next = fibptr->dev->timeout_fib;
186
fibptr->dev->timeout_fib = fibptr;
188
if (fibptr->hw_fib->header.XferState != 0) {
189
printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
190
(void*)fibptr, fibptr->hw_fib->header.XferState);
192
fibptr->next = fibptr->dev->free_fib;
193
fibptr->dev->free_fib = fibptr;
195
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
199
* fib_init - initialise a fib
200
* @fibptr: The fib to initialize
202
* Set up the generic fib fields ready for use
205
void fib_init(struct fib *fibptr)
207
struct hw_fib *hw_fib = fibptr->hw_fib;
209
hw_fib->header.StructType = FIB_MAGIC;
210
hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
211
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
212
hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
213
hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
214
hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
218
* fib_deallocate - deallocate a fib
219
* @fibptr: fib to deallocate
221
* Will deallocate and return to the free pool the FIB pointed to by the
225
void fib_dealloc(struct fib * fibptr)
227
struct hw_fib *hw_fib = fibptr->hw_fib;
228
if(hw_fib->header.StructType != FIB_MAGIC)
230
hw_fib->header.XferState = cpu_to_le32(0);
234
* Commuication primitives define and support the queuing method we use to
235
* support host to adapter commuication. All queue accesses happen through
236
* these routines and are the only routines which have a knowledge of the
237
* how these queues are implemented.
241
* aac_get_entry - get a queue entry
244
* @entry: Entry return
245
* @index: Index return
246
* @nonotify: notification control
248
* With a priority the routine returns a queue entry if the queue has free entries. If the queue
249
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
253
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255
struct aac_queue * q;
258
* All of the queues wrap when they reach the end, so we check
259
* to see if they have reached the end and if they have we just
260
* set the index back to zero. This is a wrap. You could or off
261
* the high bits in all updates but this is a bit faster I think.
264
q = &dev->queues->queue[qid];
266
*index = le32_to_cpu(*(q->headers.producer));
267
if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
270
if (qid == AdapHighCmdQueue) {
271
if (*index >= ADAP_HIGH_CMD_ENTRIES)
273
} else if (qid == AdapNormCmdQueue) {
274
if (*index >= ADAP_NORM_CMD_ENTRIES)
275
*index = 0; /* Wrap to front of the Producer Queue. */
277
else if (qid == AdapHighRespQueue)
279
if (*index >= ADAP_HIGH_RESP_ENTRIES)
282
else if (qid == AdapNormRespQueue)
284
if (*index >= ADAP_NORM_RESP_ENTRIES)
285
*index = 0; /* Wrap to front of the Producer Queue. */
289
if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
290
printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", qid, q->numpending);
293
*entry = q->base + *index;
299
* aac_queue_get - get the next free QE
301
* @index: Returned index
302
* @priority: Priority of fib
303
* @fib: Fib to associate with the queue entry
304
* @wait: Wait if queue full
305
* @fibptr: Driver fib object to go with fib
306
* @nonotify: Don't notify the adapter
308
* Gets the next free QE off the requested priorty adapter command
309
* queue and associates the Fib with the QE. The QE represented by
310
* index is ready to insert on the queue when this routine returns
314
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
316
struct aac_entry * entry = NULL;
318
struct aac_queue * q = &dev->queues->queue[qid];
320
spin_lock_irqsave(q->lock, q->SavedIrql);
322
if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
324
/* if no entries wait for some if caller wants to */
325
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
327
printk(KERN_ERR "GetEntries failed\n");
330
* Setup queue entry with a command, status and fib mapped
332
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
335
else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
337
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
339
/* if no entries wait for some if caller wants to */
342
* Setup queue entry with command, status and fib mapped
344
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
345
entry->addr = hw_fib->header.SenderFibAddress;
346
/* Restore adapters pointer to the FIB */
347
hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
351
* If MapFib is true than we need to map the Fib and put pointers
352
* in the queue entry.
355
entry->addr = fibptr->hw_fib_pa;
361
* aac_insert_entry - insert a queue entry
363
* @index: Index of entry to insert
365
* @nonotify: Suppress adapter notification
367
* Gets the next free QE off the requested priorty adapter command
368
* queue and associates the Fib with the QE. The QE represented by
369
* index is ready to insert on the queue when this routine returns
373
static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
375
struct aac_queue * q = &dev->queues->queue[qid];
379
*(q->headers.producer) = cpu_to_le32(index + 1);
380
spin_unlock_irqrestore(q->lock, q->SavedIrql);
382
if (qid == AdapHighCmdQueue ||
383
qid == AdapNormCmdQueue ||
384
qid == AdapHighRespQueue ||
385
qid == AdapNormRespQueue)
388
aac_adapter_notify(dev, qid);
391
printk("Suprise insert!\n");
396
* Define the highest level of host to adapter communication routines.
397
* These routines will support host to adapter FS commuication. These
398
* routines have no knowledge of the commuication method used. This level
399
* sends and receives FIBs. This level has no knowledge of how these FIBs
400
* get passed back and forth.
404
* fib_send - send a fib to the adapter
405
* @command: Command to send
407
* @size: Size of fib data area
408
* @priority: Priority of Fib
409
* @wait: Async/sync select
410
* @reply: True if a reply is wanted
411
* @callback: Called with reply
412
* @callback_data: Passed to callback
414
* Sends the requested FIB to the adapter and optionally will wait for a
415
* response FIB. If the caller does not wish to wait for a response than
416
* an event to wait on must be supplied. This event will be set when a
417
* response FIB is received from the adapter.
420
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
424
struct aac_dev * dev = fibptr->dev;
425
unsigned long nointr = 0;
426
struct hw_fib * hw_fib = fibptr->hw_fib;
427
struct aac_queue * q;
428
unsigned long flags = 0;
430
if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned))
433
* There are 5 cases with the wait and reponse requested flags.
434
* The only invalid cases are if the caller requests to wait and
435
* does not request a response and if the caller does not want a
436
* response and the Fibis not allocated from pool. If a response
437
* is not requesed the Fib will just be deallocaed by the DPC
438
* routine when the response comes back from the adapter. No
439
* further processing will be done besides deleting the Fib. We
440
* will have a debug mode where the adapter can notify the host
441
* it had a problem and the host can log that fact.
443
if (wait && !reply) {
445
} else if (!wait && reply) {
446
hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
447
FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
448
} else if (!wait && !reply) {
449
hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
450
FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
451
} else if (wait && reply) {
452
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
453
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
456
* Map the fib into 32bits by using the fib number
459
// hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1;
460
hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa);
461
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
463
* Set FIB state to indicate where it came from and if we want a
464
* response from the adapter. Also load the command from the
467
* Map the hw fib pointer as a 32bit value
469
hw_fib->header.Command = cpu_to_le16(command);
470
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
471
fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
473
* Set the size of the Fib we want to send to the adapter
475
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
476
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
480
* Get a queue entry connect the FIB to it and send an notify
481
* the adapter a command is ready.
483
if (priority == FsaHigh) {
484
hw_fib->header.XferState |= cpu_to_le32(HighPriority);
485
qid = AdapHighCmdQueue;
487
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
488
qid = AdapNormCmdQueue;
490
q = &dev->queues->queue[qid];
493
spin_lock_irqsave(&fibptr->event_lock, flags);
494
if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
496
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
497
dprintk((KERN_DEBUG "Fib contents:.\n"));
498
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
499
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
500
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
501
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
502
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
504
* Fill in the Callback and CallbackContext if we are not
508
fibptr->callback = callback;
509
fibptr->callback_data = callback_data;
511
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
512
list_add_tail(&fibptr->queue, &q->pendingq);
518
if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
521
* If the caller wanted us to wait for response wait now.
525
spin_unlock_irqrestore(&fibptr->event_lock, flags);
526
while (!fibptr->done)
527
aac_command_thread(dev);
528
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
534
* If the user does not want a response than return success otherwise
544
* aac_consumer_get - get the top of the queue
547
* @entry: Return entry
549
* Will return a pointer to the entry on the top of the queue requested that
550
* we are a consumer of, and return the address of the queue entry. It does
551
* not change the state of the queue.
554
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
558
if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
562
* The consumer index must be wrapped if we have reached
563
* the end of the queue, else we just use the entry
564
* pointed to by the header index
566
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
569
index = le32_to_cpu(*q->headers.consumer);
570
*entry = q->base + index;
576
int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
578
return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer));
583
* aac_consumer_free - free consumer entry
588
* Frees up the current top of the queue we are a consumer of. If the
589
* queue was full notify the producer that the queue is no longer full.
592
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
597
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
600
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
601
*q->headers.consumer = cpu_to_le32(1);
603
*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
608
case HostNormCmdQueue:
609
notify = HostNormCmdNotFull;
611
case HostHighCmdQueue:
612
notify = HostHighCmdNotFull;
614
case HostNormRespQueue:
615
notify = HostNormRespNotFull;
617
case HostHighRespQueue:
618
notify = HostHighRespNotFull;
624
aac_adapter_notify(dev, notify);
629
* fib_adapter_complete - complete adapter issued fib
630
* @fibptr: fib to complete
633
* Will do all necessary work to complete a FIB that was sent from
637
int fib_adapter_complete(struct fib * fibptr, unsigned short size)
639
struct hw_fib * hw_fib = fibptr->hw_fib;
640
struct aac_dev * dev = fibptr->dev;
641
unsigned long nointr = 0;
642
if (le32_to_cpu(hw_fib->header.XferState) == 0)
645
* If we plan to do anything check the structure type first.
647
if ( hw_fib->header.StructType != FIB_MAGIC ) {
651
* This block handles the case where the adapter had sent us a
652
* command and we have finished processing the command. We
653
* call completeFib when we are done processing the command
654
* and want to send a response back to the adapter. This will
655
* send the completed cdb to the adapter.
657
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
658
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
659
if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
663
size += sizeof(struct aac_fibhdr);
664
if (size > le16_to_cpu(hw_fib->header.SenderSize))
666
hw_fib->header.Size = cpu_to_le16(size);
668
if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
671
if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
674
else if (hw_fib->header.XferState & NormalPriority)
679
size += sizeof(struct aac_fibhdr);
680
if (size > le16_to_cpu(hw_fib->header.SenderSize))
682
hw_fib->header.Size = cpu_to_le16(size);
684
if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
686
if (aac_insert_entry(dev, index, AdapNormRespQueue,
687
(nointr & (int)aac_config.irq_mod)) != 0)
694
printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
701
* fib_complete - fib completion handler
702
* @fib: FIB to complete
704
* Will do all necessary work to complete a FIB.
707
int fib_complete(struct fib * fibptr)
709
struct hw_fib * hw_fib = fibptr->hw_fib;
712
* Check for a fib which has already been completed
715
if (hw_fib->header.XferState == cpu_to_le32(0))
718
* If we plan to do anything check the structure type first.
721
if (hw_fib->header.StructType != FIB_MAGIC)
724
* This block completes a cdb which orginated on the host and we
725
* just need to deallocate the cdb or reinit it. At this point the
726
* command is complete that we had sent to the adapter and this
727
* cdb could be reused.
729
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
730
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
734
else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
737
* This handles the case when the host has aborted the I/O
738
* to the adapter because the adapter is not responding
741
} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
750
* aac_printf - handle printf from firmware
754
* Print a message passed to us by the controller firmware on the
758
void aac_printf(struct aac_dev *dev, u32 val)
760
int length = val & 0xffff;
761
int level = (val >> 16) & 0xffff;
762
char *cp = dev->printfbuf;
765
* The size of the printfbuf is set in port.c
766
* There is no variable or define for it
772
if (level == LOG_HIGH_ERROR)
773
printk(KERN_WARNING "aacraid:%s", cp);
775
printk(KERN_INFO "aacraid:%s", cp);
781
* aac_handle_aif - Handle a message from the firmware
782
* @dev: Which adapter this fib is from
783
* @fibptr: Pointer to fibptr from adapter
785
* This routine handles a driver notify fib from the adapter and
786
* dispatches it to the appropriate routine for handling.
789
#define CONTAINER_TO_BUS(cont) (0)
790
#define CONTAINER_TO_TARGET(cont) ((cont))
791
#define CONTAINER_TO_LUN(cont) (0)
793
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
796
struct hw_fib * hw_fib = fibptr->hw_fib;
797
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
802
/* Sniff for container changes */
803
dprintk ((KERN_INFO "AifCmdDriverNotify=%x\n", le32_to_cpu(*(u32 *)aifcmd->data)));
804
switch (le32_to_cpu(*(u32 *)aifcmd->data)) {
805
case AifDenMorphComplete:
806
case AifDenVolumeExtendComplete:
807
case AifEnContainerChange: /* Not really a driver notify Event */
810
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
811
dprintk ((KERN_INFO "container=%d(%d,%d,%d,%d) ",
813
(dev && dev->scsi_host_ptr)
814
? dev->scsi_host_ptr->host_no
816
CONTAINER_TO_BUS(container),
817
CONTAINER_TO_TARGET(container),
818
CONTAINER_TO_LUN(container)));
821
* Find the Scsi_Device associated with the SCSI address,
822
* and mark it as changed, invalidating the cache. This deals
823
* with changes to existing device IDs.
826
if ((dev != (struct aac_dev *)NULL)
827
&& (dev->scsi_host_ptr != (struct Scsi_Host *)NULL)) {
828
Scsi_Device * device;
830
for (device = dev->scsi_host_ptr->host_queue;
831
device != (Scsi_Device *)NULL;
832
device = device->next) {
834
"aifd: device (%d,%d,%d,%d)?\n",
835
dev->scsi_host_ptr->host_no,
839
if ((device->channel == CONTAINER_TO_BUS(container))
840
&& (device->id == CONTAINER_TO_TARGET(container))
841
&& (device->lun == CONTAINER_TO_LUN(container))) {
842
busy |= (device->access_count != 0);
844
device->removable = TRUE;
849
dprintk (("busy=%d\n", busy));
853
* scan_scsis(dev->scsi_host_ptr, 1,
854
* CONTAINER_TO_BUS(container),
855
* CONTAINER_TO_TARGET(container),
856
* CONTAINER_TO_LUN(container));
858
* is not exported as accessible, so we need to go around it
859
* another way. So, we look for the "proc/scsi/scsi" entry in
860
* the proc filesystem (using proc_scsi as a shortcut) and send
861
* it a message. This deals with new devices that have
862
* appeared. If the device has gone offline, scan_scsis will
863
* also discover this, but we do not want the device to
864
* go away. We need to check the access_count for the
865
* device since we are not wanting the devices to go away.
867
if (busy == 0 && proc_scsi != NULL) {
868
struct proc_dir_entry * entry;
870
dprintk((KERN_INFO "proc_scsi=%p ", proc_scsi));
871
for (entry = proc_scsi->subdir; entry != (struct proc_dir_entry *)NULL; entry = entry->next) {
872
dprintk(("\"%.*s\"[%d]=%x ", entry->namelen,
873
entry->name, entry->namelen, entry->low_ino));
874
if ((entry->low_ino != 0) && (entry->namelen == 4) && (memcmp ("scsi", entry->name, 4) == 0)) {
875
dprintk(("%p->write_proc=%p ", entry, entry->write_proc));
876
if (entry->write_proc != (int (*)(struct file *, const char *, unsigned long, void *))NULL) {
881
"scsi add-single-device %d %d %d %d\n",
882
dev->scsi_host_ptr->host_no,
883
CONTAINER_TO_BUS(container),
884
CONTAINER_TO_TARGET(container),
885
CONTAINER_TO_LUN(container));
886
length = strlen (buffer);
887
dprintk((KERN_INFO "echo %.*s > /proc/scsi/scsi\n", length-1, buffer));
890
length = entry->write_proc(NULL, buffer, length, NULL);
892
dprintk((KERN_INFO "returns %d\n", length));
903
* aac_command_thread - command processing thread
904
* @dev: Adapter to monitor
906
* Waits on the commandready event in it's queue. When the event gets set
907
* it will pull FIBs off it's queue. It will continue to pull FIBs off
908
* until the queue is empty. When the queue is empty it will wait for
911
void aac_command_thread(struct aac_dev * dev)
913
struct hw_fib *hw_fib, *hw_newfib;
914
struct fib *fib, *newfib;
915
struct aac_queue_block *queues = dev->queues;
916
struct aac_fib_context *fibctx;
918
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
920
spin_lock_irqsave(&lock, flags);
923
spin_lock(queues->queue[HostNormCmdQueue].lock);
924
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
925
struct list_head *entry;
926
struct aac_aifcmd * aifcmd;
928
entry = queues->queue[HostNormCmdQueue].cmdq.next;
931
spin_unlock(queues->queue[HostNormCmdQueue].lock);
932
fib = list_entry(entry, struct fib, fiblink);
934
* We will process the FIB here or pass it to a
935
* worker thread that is TBD. We Really can't
936
* do anything at this point since we don't have
937
* anything defined for this thread to do.
939
hw_fib = fib->hw_fib;
941
memset(fib, 0, sizeof(struct fib));
942
fib->type = FSAFS_NTC_FIB_CONTEXT;
943
fib->size = sizeof( struct fib );
944
fib->hw_fib = hw_fib;
945
fib->data = hw_fib->data;
948
* We only handle AifRequest fibs from the adapter.
950
aifcmd = (struct aac_aifcmd *) hw_fib->data;
951
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
952
/* Handle Driver Notify Events */
953
aac_handle_aif(dev, fib);
954
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
955
fib_adapter_complete(fib, sizeof(u32));
957
struct list_head *entry;
958
/* The u32 here is important and intended. We are using
959
32bit wrapping time to fit the adapter field */
961
u32 time_now, time_last;
965
if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
966
aac_handle_aif(dev, fib);
968
time_now = jiffies/HZ;
970
spin_lock_irqsave(&dev->fib_lock, flagv);
971
entry = dev->fib_list.next;
973
* For each Context that is on the
974
* fibctxList, make a copy of the
975
* fib, and then set the event to wake up the
976
* thread that is waiting for it.
978
while (entry != &dev->fib_list) {
982
fibctx = list_entry(entry, struct aac_fib_context, next);
984
* Check if the queue is getting
987
if (fibctx->count > 20)
990
* It's *not* jiffies folks,
991
* but jiffies / HZ, so do not
994
time_last = fibctx->jiffies;
996
* Has it been > 2 minutes
997
* since the last read off
1000
if ((time_now - time_last) > 120) {
1001
entry = entry->next;
1002
aac_close_fib_context(dev, fibctx);
1007
* Warning: no sleep allowed while
1010
hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1011
newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1012
if (newfib && hw_newfib) {
1014
* Make the copy of the FIB
1015
* FIXME: check if we need to fix other fields up
1017
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1018
memcpy(newfib, fib, sizeof(struct fib));
1019
newfib->hw_fib = hw_newfib;
1021
* Put the FIB onto the
1024
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1027
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1033
entry = entry->next;
1036
* Set the status of this FIB
1038
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
1039
fib_adapter_complete(fib, sizeof(u32));
1040
spin_unlock_irqrestore(&dev->fib_lock, flagv);
1042
spin_lock(queues->queue[HostNormCmdQueue].lock);
1046
* There are no more AIF's
1048
spin_unlock(queues->queue[HostNormCmdQueue].lock);
1051
spin_unlock_irqrestore(&lock, flags);