2
* scsi_merge.c Copyright (C) 1999 Eric Youngdale
4
* SCSI queueing library.
5
* Initial versions: Eric Youngdale (eric@andante.org).
6
* Based upon conversations with large numbers
7
* of people at Linux Expo.
8
* Support for dynamic DMA mapping: Jakub Jelinek (jakub@redhat.com).
12
* This file contains queue management functions that are used by SCSI.
13
* Typically this is used for several purposes. First, we need to ensure
14
* that commands do not grow so large that they cannot be handled all at
15
* once by a host adapter. The various flavors of merge functions included
16
* here serve this purpose.
18
* Note that it would be quite trivial to allow the low-level driver the
19
* flexibility to define it's own queue handling functions. For the time
20
* being, the hooks are not present. Right now we are just using the
21
* data in the host template as an indicator of how we should be handling
22
* queues, and we select routines that are optimized for that purpose.
24
* Some hosts do not impose any restrictions on the size of a request.
25
* In such cases none of the merge functions in this file are called,
26
* and we allow ll_rw_blk to merge requests in the default manner.
27
* This isn't guaranteed to be optimal, but it should be pretty darned
28
* good. If someone comes up with ideas of better ways of managing queues
29
* to improve on the default behavior, then certainly fit it into this
30
* scheme in whatever manner makes the most sense. Please note that
31
* since each device has it's own queue, we have considerable flexibility
32
* in queue management.
35
#define __NO_VERSION__
36
#include <xeno/config.h>
37
#include <xeno/module.h>
39
#include <xeno/sched.h>
40
#include <xeno/timer.h>
41
/* #include <xeno/string.h> */
42
/* #include <xeno/slab.h> */
43
/* #include <xeno/ioport.h> */
44
/* #include <xeno/kernel.h> */
45
/* #include <xeno/stat.h> */
47
/* #include <xeno/interrupt.h> */
48
/* #include <xeno/delay.h> */
49
/* #include <xeno/smp_lock.h> */
52
#define __KERNEL_SYSCALLS__
54
/* #include <xeno/unistd.h> */
56
#include <asm/system.h>
63
#include "constants.h"
64
#include <scsi/scsi_ioctl.h>
67
* This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
68
* Ultimately we should get away from using a dedicated DMA bounce buffer
69
* pool, and we should instead try and use kmalloc() instead. If we can
70
* eliminate this pool, then this restriction would no longer be needed.
72
#define DMA_SEGMENT_SIZE_LIMITED
74
#ifdef CONFIG_SCSI_DEBUG_QUEUES
76
* Enable a bunch of additional consistency checking. Turn this off
77
* if you are benchmarking.
79
static int dump_stats(struct request *req,
84
struct buffer_head *bh;
87
* Dump the information that we have. We know we have an
90
printk("nr_segments is %x\n", req->nr_segments);
91
printk("counted segments is %x\n", segments);
92
printk("Flags %d %d\n", use_clustering, dma_host);
93
for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext)
95
printk("Segment 0x%p, blocks %d, addr 0x%lx\n",
98
virt_to_phys(bh->b_data - 1));
100
panic("Ththththaats all folks. Too dangerous to continue.\n");
105
* Simple sanity check that we will use for the first go around
106
* in order to ensure that we are doing the counting correctly.
107
* This can be removed for optimization.
109
#define SANITY_CHECK(req, _CLUSTER, _DMA) \
110
if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
112
printk("Incorrect segment count at 0x%p", current_text_addr()); \
113
dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
116
#define SANITY_CHECK(req, _CLUSTER, _DMA)
119
static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
122
struct scatterlist *sgpnt;
126
sgpnt = (struct scatterlist *) SCpnt->request_buffer;
127
bbpnt = SCpnt->bounce_buffers;
130
* Now print out a bunch of stats. First, start with the request
133
printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
134
printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
135
printk("request_bufflen:%d\n", SCpnt->request_bufflen);
137
* Now dump the scatter-gather table, up to the point of failure.
139
for(jj=0; jj < SCpnt->use_sg; jj++)
141
printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
145
(bbpnt ? bbpnt[jj] : NULL));
146
if (bbpnt && bbpnt[jj])
147
consumed += sgpnt[jj].length;
149
printk("Total %d sectors consumed\n", consumed);
150
panic("DMA pool exhausted");
153
#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering)
156
* This entire source file deals with the new queueing code.
160
* Function: __count_segments()
162
* Purpose: Prototype for queue merge function.
164
* Arguments: q - Queue for which we are merging request.
165
* req - request into which we wish to merge.
166
* use_clustering - 1 if this host wishes to use clustering
167
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
168
* expose all of the address lines, so that DMA cannot
169
* be done from an arbitrary address).
170
* remainder - used to track the residual size of the last
171
* segment. Comes in handy when we want to limit the
172
* size of bounce buffer segments to PAGE_SIZE.
174
* Returns: Count of the number of SG segments for the request.
178
* Notes: This is only used for diagnostic purposes.
180
__inline static int __count_segments(struct request *req,
187
struct buffer_head *bh;
188
struct buffer_head *bhnext;
190
if( remainder != NULL ) {
191
reqsize = *remainder;
195
* Add in the size increment for the first buffer.
198
#ifdef DMA_SEGMENT_SIZE_LIMITED
199
if( reqsize + bh->b_size > PAGE_SIZE ) {
201
reqsize = bh->b_size;
203
reqsize += bh->b_size;
206
reqsize += bh->b_size;
209
for (bh = req->bh, bhnext = bh->b_reqnext;
211
bh = bhnext, bhnext = bh->b_reqnext) {
212
if (use_clustering) {
214
* See if we can do this without creating another
215
* scatter-gather segment. In the event that this is a
216
* DMA capable host, make sure that a segment doesn't span
217
* the DMA threshold boundary.
220
virt_to_phys(bhnext->b_data) - 1 == ISA_DMA_THRESHOLD) {
222
reqsize = bhnext->b_size;
223
} else if (CONTIGUOUS_BUFFERS(bh, bhnext)) {
225
* This one is OK. Let it go.
227
#ifdef DMA_SEGMENT_SIZE_LIMITED
228
/* Note scsi_malloc is only able to hand out
229
* chunks of memory in sizes of PAGE_SIZE or
230
* less. Thus we need to keep track of
231
* the size of the piece that we have
232
* seen so far, and if we have hit
233
* the limit of PAGE_SIZE, then we are
234
* kind of screwed and we need to start
238
&& virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD
239
&& reqsize + bhnext->b_size > PAGE_SIZE )
242
reqsize = bhnext->b_size;
246
reqsize += bhnext->b_size;
250
reqsize = bhnext->b_size;
253
reqsize = bhnext->b_size;
256
if( remainder != NULL ) {
257
*remainder = reqsize;
263
* Function: recount_segments()
265
* Purpose: Recount the number of scatter-gather segments for this request.
267
* Arguments: req - request that needs recounting.
269
* Returns: Count of the number of SG segments for the request.
271
* Lock status: Irrelevant.
273
* Notes: This is only used when we have partially completed requests
274
* and the bit that is leftover is of an indeterminate size.
275
* This can come up if you get a MEDIUM_ERROR, for example,
276
* as we will have "completed" all of the sectors up to and
277
* including the bad sector, and the leftover bit is what
278
* we have to do now. This tends to be a rare occurrence, so
279
* we aren't busting our butts to instantiate separate versions
280
* of this function for the 4 different flag values. We
281
* probably should, however.
284
recount_segments(Scsi_Cmnd * SCpnt)
287
struct Scsi_Host *SHpnt;
290
req = &SCpnt->request;
292
SDpnt = SCpnt->device;
294
req->nr_segments = __count_segments(req,
295
CLUSTERABLE_DEVICE(SHpnt, SDpnt),
296
SHpnt->unchecked_isa_dma, NULL);
299
#define MERGEABLE_BUFFERS(X,Y) \
300
(((((long)(X)->b_data+(X)->b_size)|((long)(Y)->b_data)) & \
301
(DMA_CHUNK_SIZE - 1)) == 0)
303
#ifdef DMA_CHUNK_SIZE
304
static inline int scsi_new_mergeable(request_queue_t * q,
305
struct request * req,
306
struct Scsi_Host *SHpnt,
310
* pci_map_sg will be able to merge these two
311
* into a single hardware sg entry, check if
312
* we'll have enough memory for the sg list.
313
* scsi.c allocates for this purpose
314
* min(64,sg_tablesize) entries.
316
if (req->nr_segments >= max_segments ||
317
req->nr_segments >= SHpnt->sg_tablesize)
323
static inline int scsi_new_segment(request_queue_t * q,
324
struct request * req,
325
struct Scsi_Host *SHpnt,
329
* pci_map_sg won't be able to map these two
330
* into a single hardware sg entry, so we have to
331
* check if things fit into sg_tablesize.
333
if (req->nr_hw_segments >= SHpnt->sg_tablesize ||
334
req->nr_segments >= SHpnt->sg_tablesize)
336
req->nr_hw_segments++;
341
static inline int scsi_new_segment(request_queue_t * q,
342
struct request * req,
343
struct Scsi_Host *SHpnt,
346
if (req->nr_segments < SHpnt->sg_tablesize &&
347
req->nr_segments < max_segments) {
349
* This will form the start of a new segment. Bump the
361
* Function: __scsi_merge_fn()
363
* Purpose: Prototype for queue merge function.
365
* Arguments: q - Queue for which we are merging request.
366
* req - request into which we wish to merge.
367
* bh - Block which we may wish to merge into request
368
* use_clustering - 1 if this host wishes to use clustering
369
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
370
* expose all of the address lines, so that DMA cannot
371
* be done from an arbitrary address).
373
* Returns: 1 if it is OK to merge the block into the request. 0
376
* Lock status: io_request_lock is assumed to be held here.
378
* Notes: Some drivers have limited scatter-gather table sizes, and
379
* thus they cannot queue an infinitely large command. This
380
* function is called from ll_rw_blk before it attempts to merge
381
* a new block into a request to make sure that the request will
382
* not become too large.
384
* This function is not designed to be directly called. Instead
385
* it should be referenced from other functions where the
386
* use_clustering and dma_host parameters should be integer
387
* constants. The compiler should thus be able to properly
388
* optimize the code, eliminating stuff that is irrelevant.
389
* It is more maintainable to do this way with a single function
390
* than to have 4 separate functions all doing roughly the
393
__inline static int __scsi_back_merge_fn(request_queue_t * q,
395
struct buffer_head *bh,
401
unsigned int segment_size = 0;
403
struct Scsi_Host *SHpnt;
405
SDpnt = (Scsi_Device *) q->queuedata;
408
#ifdef DMA_CHUNK_SIZE
409
if (max_segments > 64)
413
if ((req->nr_sectors + (bh->b_size >> 9)) > SHpnt->max_sectors)
416
if (use_clustering) {
418
* See if we can do this without creating another
419
* scatter-gather segment. In the event that this is a
420
* DMA capable host, make sure that a segment doesn't span
421
* the DMA threshold boundary.
424
virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
425
goto new_end_segment;
427
if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
428
#ifdef DMA_SEGMENT_SIZE_LIMITED
430
&& virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
432
count = __count_segments(req, use_clustering, dma_host, &segment_size);
433
if( segment_size + bh->b_size > PAGE_SIZE ) {
434
goto new_end_segment;
439
* This one is OK. Let it go.
445
#ifdef DMA_CHUNK_SIZE
446
if (MERGEABLE_BUFFERS(req->bhtail, bh))
447
return scsi_new_mergeable(q, req, SHpnt, max_segments);
449
return scsi_new_segment(q, req, SHpnt, max_segments);
452
__inline static int __scsi_front_merge_fn(request_queue_t * q,
454
struct buffer_head *bh,
460
unsigned int segment_size = 0;
462
struct Scsi_Host *SHpnt;
464
SDpnt = (Scsi_Device *) q->queuedata;
467
#ifdef DMA_CHUNK_SIZE
468
if (max_segments > 64)
472
if ((req->nr_sectors + (bh->b_size >> 9)) > SHpnt->max_sectors)
475
if (use_clustering) {
477
* See if we can do this without creating another
478
* scatter-gather segment. In the event that this is a
479
* DMA capable host, make sure that a segment doesn't span
480
* the DMA threshold boundary.
483
virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
484
goto new_start_segment;
486
if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
487
#ifdef DMA_SEGMENT_SIZE_LIMITED
489
&& virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
490
segment_size = bh->b_size;
491
count = __count_segments(req, use_clustering, dma_host, &segment_size);
492
if( count != req->nr_segments ) {
493
goto new_start_segment;
498
* This one is OK. Let it go.
504
#ifdef DMA_CHUNK_SIZE
505
if (MERGEABLE_BUFFERS(bh, req->bh))
506
return scsi_new_mergeable(q, req, SHpnt, max_segments);
508
return scsi_new_segment(q, req, SHpnt, max_segments);
512
* Function: scsi_merge_fn_()
514
* Purpose: queue merge function.
516
* Arguments: q - Queue for which we are merging request.
517
* req - request into which we wish to merge.
518
* bh - Block which we may wish to merge into request
520
* Returns: 1 if it is OK to merge the block into the request. 0
523
* Lock status: io_request_lock is assumed to be held here.
525
* Notes: Optimized for different cases depending upon whether
526
* ISA DMA is in use and whether clustering should be used.
528
#define MERGEFCT(_FUNCTION, _BACK_FRONT, _CLUSTER, _DMA) \
529
static int _FUNCTION(request_queue_t * q, \
530
struct request * req, \
531
struct buffer_head * bh, \
535
SANITY_CHECK(req, _CLUSTER, _DMA); \
536
ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
545
/* Version with use_clustering 0 and dma_host 1 is not necessary,
546
* since the only use of dma_host above is protected by use_clustering.
548
MERGEFCT(scsi_back_merge_fn_, back, 0, 0)
549
MERGEFCT(scsi_back_merge_fn_c, back, 1, 0)
550
MERGEFCT(scsi_back_merge_fn_dc, back, 1, 1)
552
MERGEFCT(scsi_front_merge_fn_, front, 0, 0)
553
MERGEFCT(scsi_front_merge_fn_c, front, 1, 0)
554
MERGEFCT(scsi_front_merge_fn_dc, front, 1, 1)
557
* Function: __scsi_merge_requests_fn()
559
* Purpose: Prototype for queue merge function.
561
* Arguments: q - Queue for which we are merging request.
562
* req - request into which we wish to merge.
563
* next - 2nd request that we might want to combine with req
564
* use_clustering - 1 if this host wishes to use clustering
565
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
566
* expose all of the address lines, so that DMA cannot
567
* be done from an arbitrary address).
569
* Returns: 1 if it is OK to merge the two requests. 0
572
* Lock status: io_request_lock is assumed to be held here.
574
* Notes: Some drivers have limited scatter-gather table sizes, and
575
* thus they cannot queue an infinitely large command. This
576
* function is called from ll_rw_blk before it attempts to merge
577
* a new block into a request to make sure that the request will
578
* not become too large.
580
* This function is not designed to be directly called. Instead
581
* it should be referenced from other functions where the
582
* use_clustering and dma_host parameters should be integer
583
* constants. The compiler should thus be able to properly
584
* optimize the code, eliminating stuff that is irrelevant.
585
* It is more maintainable to do this way with a single function
586
* than to have 4 separate functions all doing roughly the
589
__inline static int __scsi_merge_requests_fn(request_queue_t * q,
591
struct request *next,
597
struct Scsi_Host *SHpnt;
600
* First check if the either of the requests are re-queued
601
* requests. Can't merge them if they are.
603
if (req->special || next->special)
606
SDpnt = (Scsi_Device *) q->queuedata;
609
#ifdef DMA_CHUNK_SIZE
610
if (max_segments > 64)
613
/* If it would not fit into prepared memory space for sg chain,
614
* then don't allow the merge.
616
if (req->nr_segments + next->nr_segments - 1 > max_segments ||
617
req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
620
if (req->nr_hw_segments + next->nr_hw_segments - 1 > SHpnt->sg_tablesize) {
625
* If the two requests together are too large (even assuming that we
626
* can merge the boundary requests into one segment, then don't
629
if (req->nr_segments + next->nr_segments - 1 > SHpnt->sg_tablesize) {
634
if ((req->nr_sectors + next->nr_sectors) > SHpnt->max_sectors)
638
* The main question is whether the two segments at the boundaries
639
* would be considered one or two.
641
if (use_clustering) {
643
* See if we can do this without creating another
644
* scatter-gather segment. In the event that this is a
645
* DMA capable host, make sure that a segment doesn't span
646
* the DMA threshold boundary.
649
virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
652
#ifdef DMA_SEGMENT_SIZE_LIMITED
654
* We currently can only allocate scatter-gather bounce
655
* buffers in chunks of PAGE_SIZE or less.
658
&& CONTIGUOUS_BUFFERS(req->bhtail, next->bh)
659
&& virt_to_phys(req->bhtail->b_data) - 1 >= ISA_DMA_THRESHOLD )
661
int segment_size = 0;
664
count = __count_segments(req, use_clustering, dma_host, &segment_size);
665
count += __count_segments(next, use_clustering, dma_host, &segment_size);
666
if( count != req->nr_segments + next->nr_segments ) {
671
if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
673
* This one is OK. Let it go.
675
req->nr_segments += next->nr_segments - 1;
676
#ifdef DMA_CHUNK_SIZE
677
req->nr_hw_segments += next->nr_hw_segments - 1;
683
#ifdef DMA_CHUNK_SIZE
684
if (req->nr_segments + next->nr_segments > max_segments ||
685
req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
688
/* If dynamic DMA mapping can merge last segment in req with
689
* first segment in next, then the check for hw segments was
690
* done above already, so we can always merge.
692
if (MERGEABLE_BUFFERS (req->bhtail, next->bh)) {
693
req->nr_hw_segments += next->nr_hw_segments - 1;
694
} else if (req->nr_hw_segments + next->nr_hw_segments > SHpnt->sg_tablesize) {
697
req->nr_hw_segments += next->nr_hw_segments;
699
req->nr_segments += next->nr_segments;
703
* We know that the two requests at the boundary should not be combined.
704
* Make sure we can fix something that is the sum of the two.
705
* A slightly stricter test than we had above.
707
if (req->nr_segments + next->nr_segments > max_segments ||
708
req->nr_segments + next->nr_segments > SHpnt->sg_tablesize) {
712
* This will form the start of a new segment. Bump the
715
req->nr_segments += next->nr_segments;
722
* Function: scsi_merge_requests_fn_()
724
* Purpose: queue merge function.
726
* Arguments: q - Queue for which we are merging request.
727
* req - request into which we wish to merge.
728
* bh - Block which we may wish to merge into request
730
* Returns: 1 if it is OK to merge the block into the request. 0
733
* Lock status: io_request_lock is assumed to be held here.
735
* Notes: Optimized for different cases depending upon whether
736
* ISA DMA is in use and whether clustering should be used.
738
#define MERGEREQFCT(_FUNCTION, _CLUSTER, _DMA) \
739
static int _FUNCTION(request_queue_t * q, \
740
struct request * req, \
741
struct request * next, \
745
SANITY_CHECK(req, _CLUSTER, _DMA); \
746
ret = __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
750
/* Version with use_clustering 0 and dma_host 1 is not necessary,
751
* since the only use of dma_host above is protected by use_clustering.
753
MERGEREQFCT(scsi_merge_requests_fn_, 0, 0)
754
MERGEREQFCT(scsi_merge_requests_fn_c, 1, 0)
755
MERGEREQFCT(scsi_merge_requests_fn_dc, 1, 1)
757
* Function: __init_io()
759
* Purpose: Prototype for io initialize function.
761
* Arguments: SCpnt - Command descriptor we wish to initialize
762
* sg_count_valid - 1 if the sg count in the req is valid.
763
* use_clustering - 1 if this host wishes to use clustering
764
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
765
* expose all of the address lines, so that DMA cannot
766
* be done from an arbitrary address).
768
* Returns: 1 on success.
772
* Notes: Only the SCpnt argument should be a non-constant variable.
773
* This function is designed in such a way that it will be
774
* invoked from a series of small stubs, each of which would
775
* be optimized for specific circumstances.
777
* The advantage of this is that hosts that don't do DMA
778
* get versions of the function that essentially don't have
779
* any of the DMA code. Same goes for clustering - in the
780
* case of hosts with no need for clustering, there is no point
781
* in a whole bunch of overhead.
783
* Finally, in the event that a host has set can_queue to SG_ALL
784
* implying that there is no limit to the length of a scatter
785
* gather list, the sg count in the request won't be valid
786
* (mainly because we don't need queue management functions
787
* which keep the tally uptodate.
789
__inline static int __init_io(Scsi_Cmnd * SCpnt,
794
struct buffer_head * bh;
795
struct buffer_head * bhprev;
799
struct request * req;
801
struct scatterlist * sgpnt;
805
// printk("scsi_merge.c: __init_io entered\n");
807
* FIXME(eric) - don't inline this - it doesn't depend on the
808
* integer flags. Come to think of it, I don't think this is even
809
* needed any more. Need to play with it and see if we hit the
810
* panic. If not, then don't bother.
812
if (!SCpnt->request.bh) {
814
* Case of page request (i.e. raw device), or unlinked buffer
815
* Typically used for swapping, but this isn't how we do
818
panic("I believe this is dead code. If we hit this, I was wrong");
820
SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9;
821
SCpnt->request_buffer = SCpnt->request.buffer;
824
* FIXME(eric) - need to handle DMA here.
829
req = &SCpnt->request;
831
* First we need to know how many scatter gather segments are needed.
833
if (!sg_count_valid) {
834
count = __count_segments(req, use_clustering, dma_host, NULL);
836
count = req->nr_segments;
840
* If the dma pool is nearly empty, then queue a minimal request
841
* with a single segment. Typically this will satisfy a single
844
if (dma_host && scsi_dma_free_sectors <= 10) {
845
this_count = SCpnt->request.current_nr_sectors;
849
* Don't bother with scatter-gather if there is only one segment.
852
this_count = SCpnt->request.nr_sectors;
855
SCpnt->use_sg = count;
858
* Allocate the actual scatter-gather table itself.
860
SCpnt->sglist_len = (SCpnt->use_sg * sizeof(struct scatterlist));
862
/* If we could potentially require ISA bounce buffers, allocate
863
* space for this array here.
866
SCpnt->sglist_len += (SCpnt->use_sg * sizeof(void *));
868
/* scsi_malloc can only allocate in chunks of 512 bytes so
871
SCpnt->sglist_len = (SCpnt->sglist_len + 511) & ~511;
873
sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
876
* Now fill the scatter-gather table.
880
* If we cannot allocate the scatter-gather table, then
881
* simply write the first buffer all by itself.
883
printk("Warning - running *really* short on DMA buffers\n");
884
this_count = SCpnt->request.current_nr_sectors;
888
* Next, walk the list, and fill in the addresses and sizes of
891
memset(sgpnt, 0, SCpnt->sglist_len);
892
SCpnt->request_buffer = (char *) sgpnt;
893
SCpnt->request_bufflen = 0;
897
bbpnt = (void **) ((char *)sgpnt +
898
(SCpnt->use_sg * sizeof(struct scatterlist)));
902
SCpnt->bounce_buffers = bbpnt;
904
for (count = 0, bh = SCpnt->request.bh;
905
bh; bh = bh->b_reqnext) {
906
if (use_clustering && bhprev != NULL) {
908
virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) {
909
/* Nothing - fall through */
910
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
912
* This one is OK. Let it go. Note that we
913
* do not have the ability to allocate
914
* bounce buffer segments > PAGE_SIZE, so
915
* for now we limit the thing.
918
#ifdef DMA_SEGMENT_SIZE_LIMITED
919
if( virt_to_phys(bh->b_data) - 1 < ISA_DMA_THRESHOLD
920
|| sgpnt[count - 1].length + bh->b_size <= PAGE_SIZE ) {
921
sgpnt[count - 1].length += bh->b_size;
926
sgpnt[count - 1].length += bh->b_size;
931
sgpnt[count - 1].length += bh->b_size;
932
SCpnt->request_bufflen += bh->b_size;
939
sgpnt[count - 1].address = bh->b_data;
940
sgpnt[count - 1].page = NULL;
941
sgpnt[count - 1].length += bh->b_size;
943
SCpnt->request_bufflen += bh->b_size;
949
* Verify that the count is correct.
951
if (count != SCpnt->use_sg) {
952
printk("Incorrect number of segments after building list\n");
953
#ifdef CONFIG_SCSI_DEBUG_QUEUES
954
dump_stats(req, use_clustering, dma_host, count);
961
* Now allocate bounce buffers, if needed.
963
SCpnt->request_bufflen = 0;
964
for (i = 0; i < count; i++) {
965
sectors = (sgpnt[i].length >> 9);
966
SCpnt->request_bufflen += sgpnt[i].length;
967
if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
969
if( scsi_dma_free_sectors - sectors <= 10 ) {
971
* If this would nearly drain the DMA
972
* pool empty, then let's stop here.
973
* Don't make this request any larger.
974
* This is kind of a safety valve that
975
* we use - we could get screwed later
976
* on if we run out completely.
978
SCpnt->request_bufflen -= sgpnt[i].length;
986
bbpnt[i] = sgpnt[i].address;
988
(char *) scsi_malloc(sgpnt[i].length);
990
* If we cannot allocate memory for this DMA bounce
991
* buffer, then queue just what we have done so far.
993
if (sgpnt[i].address == NULL) {
994
printk("Warning - running low on DMA memory\n");
995
SCpnt->request_bufflen -= sgpnt[i].length;
1002
if (SCpnt->request.cmd == WRITE) {
1003
memcpy(sgpnt[i].address, bbpnt[i],
1012
* We come here in the event that we get one humongous
1013
* request, where we need a bounce buffer, and the buffer is
1014
* more than we can allocate in a single call to
1015
* scsi_malloc(). In addition, we only come here when it is
1016
* the 0th element of the scatter-gather table that gets us
1017
* into this trouble. As a fallback, we fall back to
1018
* non-scatter-gather, and ask for a single segment. We make
1019
* a half-hearted attempt to pick a reasonably large request
1020
* size mainly so that we don't thrash the thing with
1021
* iddy-biddy requests.
1025
* The original number of sectors in the 0th element of the
1026
* scatter-gather table.
1028
sectors = sgpnt[0].length >> 9;
1031
* Free up the original scatter-gather table. Note that since
1032
* it was the 0th element that got us here, we don't have to
1033
* go in and free up memory from the other slots.
1035
SCpnt->request_bufflen = 0;
1037
scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
1040
* Make an attempt to pick up as much as we reasonably can.
1041
* Just keep adding sectors until the pool starts running kind of
1042
* low. The limit of 30 is somewhat arbitrary - the point is that
1043
* it would kind of suck if we dropped down and limited ourselves to
1044
* single-block requests if we had hundreds of free sectors.
1046
if( scsi_dma_free_sectors > 30 ) {
1047
for (this_count = 0, bh = SCpnt->request.bh;
1048
bh; bh = bh->b_reqnext) {
1049
if( scsi_dma_free_sectors - this_count < 30
1050
|| this_count == sectors )
1054
this_count += bh->b_size >> 9;
1059
* Yow! Take the absolute minimum here.
1061
this_count = SCpnt->request.current_nr_sectors;
1065
* Now drop through into the single-segment case.
1070
* Come here if for any reason we choose to do this as a single
1071
* segment. Possibly the entire request, or possibly a small
1072
* chunk of the entire request.
1074
bh = SCpnt->request.bh;
1075
buff = SCpnt->request.buffer;
1079
* Allocate a DMA bounce buffer. If the allocation fails, fall
1080
* back and allocate a really small one - enough to satisfy
1083
if (virt_to_phys(SCpnt->request.bh->b_data)
1084
+ (this_count << 9) - 1 > ISA_DMA_THRESHOLD) {
1085
buff = (char *) scsi_malloc(this_count << 9);
1087
printk("Warning - running low on DMA memory\n");
1088
this_count = SCpnt->request.current_nr_sectors;
1089
buff = (char *) scsi_malloc(this_count << 9);
1091
dma_exhausted(SCpnt, 0);
1094
if (SCpnt->request.cmd == WRITE)
1095
memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);
1098
SCpnt->request_bufflen = this_count << 9;
1099
SCpnt->request_buffer = buff;
1104
#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA) \
1105
static int _FUNCTION(Scsi_Cmnd * SCpnt) \
1107
return __init_io(SCpnt, _VALID, _CLUSTER, _DMA); \
1111
* ll_rw_blk.c now keeps track of the number of segments in
1112
* a request. Thus we don't have to do it any more here.
1113
* We always force "_VALID" to 1. Eventually clean this up
1114
* and get rid of the extra argument.
1116
INITIO(scsi_init_io_v, 1, 0, 0)
1117
INITIO(scsi_init_io_vd, 1, 0, 1)
1118
INITIO(scsi_init_io_vc, 1, 1, 0)
1119
INITIO(scsi_init_io_vdc, 1, 1, 1)
1122
* Function: initialize_merge_fn()
1124
* Purpose: Initialize merge function for a host
1126
* Arguments: SHpnt - Host descriptor.
1134
void initialize_merge_fn(Scsi_Device * SDpnt)
1137
struct Scsi_Host *SHpnt;
1138
SHpnt = SDpnt->host;
1140
q = &SDpnt->request_queue;
1143
* If the host has already selected a merge manager, then don't
1147
if (q->back_merge_fn && q->front_merge_fn)
1151
* If this host has an unlimited tablesize, then don't bother with a
1152
* merge manager. The whole point of the operation is to make sure
1153
* that requests don't grow too large, and this host isn't picky.
1155
* Note that ll_rw_blk.c is effectively maintaining a segment
1156
* count which is only valid if clustering is used, and it obviously
1157
* doesn't handle the DMA case. In the end, it
1158
* is simply easier to do it ourselves with our own functions
1159
* rather than rely upon the default behavior of ll_rw_blk.
1161
if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
1162
q->back_merge_fn = scsi_back_merge_fn_;
1163
q->front_merge_fn = scsi_front_merge_fn_;
1164
q->merge_requests_fn = scsi_merge_requests_fn_;
1165
SDpnt->scsi_init_io_fn = scsi_init_io_v;
1166
} else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
1167
q->back_merge_fn = scsi_back_merge_fn_;
1168
q->front_merge_fn = scsi_front_merge_fn_;
1169
q->merge_requests_fn = scsi_merge_requests_fn_;
1170
SDpnt->scsi_init_io_fn = scsi_init_io_vd;
1171
} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
1172
q->back_merge_fn = scsi_back_merge_fn_c;
1173
q->front_merge_fn = scsi_front_merge_fn_c;
1174
q->merge_requests_fn = scsi_merge_requests_fn_c;
1175
SDpnt->scsi_init_io_fn = scsi_init_io_vc;
1176
} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
1177
q->back_merge_fn = scsi_back_merge_fn_dc;
1178
q->front_merge_fn = scsi_front_merge_fn_dc;
1179
q->merge_requests_fn = scsi_merge_requests_fn_dc;
1180
SDpnt->scsi_init_io_fn = scsi_init_io_vdc;