2
* linux/drivers/block/elevator.c
4
* Block device elevator/IO-scheduler.
6
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8
* 30042000 Jens Axboe <axboe@suse.de> :
10
* Split the elevator a bit so that it is possible to choose a different
11
* one or even write a new "plug in". There are three pieces:
12
* - elevator_fn, inserts a new request in the queue list
13
* - elevator_merge_fn, decides whether a new buffer can be merged with
15
* - elevator_dequeue_fn, called when a request is taken off the active list
17
* 20082000 Dave Jones <davej@suse.de> :
18
* Removed tests for max-bomb-segments, which was breaking elvtune
19
* when run without -bN
23
#include <xeno/config.h>
24
#include <xeno/types.h>
25
/*#include <xeno/fs.h>*/
26
#include <xeno/blkdev.h>
27
#include <xeno/elevator.h>
29
#include <xeno/module.h>
30
#include <asm/uaccess.h>
33
* This is a bit tricky. It's given that bh and rq are for the same
34
* device, but the next request might of course not be. Run through
35
* the tests below to check if we want to insert here if we can't merge
36
* bh into an existing request
38
inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq,
39
struct list_head *head)
41
struct list_head *next;
42
struct request *next_rq;
44
next = rq->queue.next;
49
* if the device is different (usually on a different partition),
50
* just check if bh is after rq
52
next_rq = blkdev_entry_to_request(next);
53
if (next_rq->rq_dev != rq->rq_dev)
54
return bh->b_rsector > rq->sector;
57
* ok, rq, next_rq and bh are on the same device. if bh is in between
58
* the two, this is the sweet spot
60
if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
64
* next_rq is ordered wrt rq, but bh is not in between the two
66
if (next_rq->sector > rq->sector)
70
* next_rq and rq not ordered, if we happen to be either before
71
* next_rq or after rq insert here anyway
73
if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
80
int elevator_linus_merge(request_queue_t *q, struct request **req,
81
struct list_head * head,
82
struct buffer_head *bh, int rw,
85
struct list_head *entry = &q->queue_head;
86
unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
88
while ((entry = entry->prev) != head) {
89
struct request *__rq = blkdev_entry_to_request(entry);
92
* simply "aging" of requests in queue
94
if (__rq->elevator_sequence-- <= 0)
99
if (__rq->rq_dev != bh->b_rdev)
101
if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
105
if (__rq->nr_sectors + count > max_sectors)
107
if (__rq->elevator_sequence < count)
109
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
110
ret = ELEVATOR_BACK_MERGE;
113
} else if (__rq->sector - count == bh->b_rsector) {
114
ret = ELEVATOR_FRONT_MERGE;
115
__rq->elevator_sequence -= count;
124
void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
126
struct list_head *entry = &req->queue, *head = &q->queue_head;
129
* second pass scan of requests that got passed over, if any
131
while ((entry = entry->next) != head) {
132
struct request *tmp = blkdev_entry_to_request(entry);
133
tmp->elevator_sequence -= count;
137
void elevator_linus_merge_req(struct request *req, struct request *next)
139
if (next->elevator_sequence < req->elevator_sequence)
140
req->elevator_sequence = next->elevator_sequence;
144
* See if we can find a request that this buffer can be coalesced with.
146
int elevator_noop_merge(request_queue_t *q, struct request **req,
147
struct list_head * head,
148
struct buffer_head *bh, int rw,
151
struct list_head *entry;
152
unsigned int count = bh->b_size >> 9;
154
if (list_empty(&q->queue_head))
155
return ELEVATOR_NO_MERGE;
157
entry = &q->queue_head;
158
while ((entry = entry->prev) != head) {
159
struct request *__rq = blkdev_entry_to_request(entry);
163
if (__rq->rq_dev != bh->b_rdev)
165
if (__rq->nr_sectors + count > max_sectors)
169
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
171
return ELEVATOR_BACK_MERGE;
172
} else if (__rq->sector - count == bh->b_rsector) {
174
return ELEVATOR_FRONT_MERGE;
178
*req = blkdev_entry_to_request(q->queue_head.prev);
179
return ELEVATOR_NO_MERGE;
182
void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
184
void elevator_noop_merge_req(struct request *req, struct request *next) {}
186
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
188
blkelv_ioctl_arg_t output;
190
output.queue_ID = elevator->queue_ID;
191
output.read_latency = elevator->read_latency;
192
output.write_latency = elevator->write_latency;
193
output.max_bomb_segments = 0;
195
if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
201
int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
203
blkelv_ioctl_arg_t input;
205
if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
208
if (input.read_latency < 0)
210
if (input.write_latency < 0)
213
elevator->read_latency = input.read_latency;
214
elevator->write_latency = input.write_latency;
218
void elevator_init(elevator_t * elevator, elevator_t type)
220
static unsigned int queue_ID;
223
elevator->queue_ID = queue_ID++;