~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xen/drivers/block/elevator.c

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 *  linux/drivers/block/elevator.c
3
 
 *
4
 
 *  Block device elevator/IO-scheduler.
5
 
 *
6
 
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7
 
 *
8
 
 * 30042000 Jens Axboe <axboe@suse.de> :
9
 
 *
10
 
 * Split the elevator a bit so that it is possible to choose a different
11
 
 * one or even write a new "plug in". There are three pieces:
12
 
 * - elevator_fn, inserts a new request in the queue list
13
 
 * - elevator_merge_fn, decides whether a new buffer can be merged with
14
 
 *   an existing request
15
 
 * - elevator_dequeue_fn, called when a request is taken off the active list
16
 
 *
17
 
 * 20082000 Dave Jones <davej@suse.de> :
18
 
 * Removed tests for max-bomb-segments, which was breaking elvtune
19
 
 *  when run without -bN
20
 
 *
21
 
 */
22
 
 
23
 
#include <xeno/config.h>
24
 
#include <xeno/types.h>
25
 
/*#include <xeno/fs.h>*/
26
 
#include <xeno/blkdev.h>
27
 
#include <xeno/elevator.h>
28
 
#include <xeno/blk.h>
29
 
#include <xeno/module.h>
30
 
#include <asm/uaccess.h>
31
 
 
32
 
/*
33
 
 * This is a bit tricky. It's given that bh and rq are for the same
34
 
 * device, but the next request might of course not be. Run through
35
 
 * the tests below to check if we want to insert here if we can't merge
36
 
 * bh into an existing request
37
 
 */
38
 
inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq,
39
 
                            struct list_head *head)
40
 
{
41
 
        struct list_head *next;
42
 
        struct request *next_rq;
43
 
 
44
 
        next = rq->queue.next;
45
 
        if (next == head)
46
 
                return 0;
47
 
 
48
 
        /*
49
 
         * if the device is different (usually on a different partition),
50
 
         * just check if bh is after rq
51
 
         */
52
 
        next_rq = blkdev_entry_to_request(next);
53
 
        if (next_rq->rq_dev != rq->rq_dev)
54
 
                return bh->b_rsector > rq->sector;
55
 
 
56
 
        /*
57
 
         * ok, rq, next_rq and bh are on the same device. if bh is in between
58
 
         * the two, this is the sweet spot
59
 
         */
60
 
        if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
61
 
                return 1;
62
 
 
63
 
        /*
64
 
         * next_rq is ordered wrt rq, but bh is not in between the two
65
 
         */
66
 
        if (next_rq->sector > rq->sector)
67
 
                return 0;
68
 
 
69
 
        /*
70
 
         * next_rq and rq not ordered, if we happen to be either before
71
 
         * next_rq or after rq insert here anyway
72
 
         */
73
 
        if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
74
 
                return 1;
75
 
 
76
 
        return 0;
77
 
}
78
 
 
79
 
 
80
 
int elevator_linus_merge(request_queue_t *q, struct request **req,
81
 
                         struct list_head * head,
82
 
                         struct buffer_head *bh, int rw,
83
 
                         int max_sectors)
84
 
{
85
 
        struct list_head *entry = &q->queue_head;
86
 
        unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
87
 
 
88
 
        while ((entry = entry->prev) != head) {
89
 
                struct request *__rq = blkdev_entry_to_request(entry);
90
 
 
91
 
                /*
92
 
                 * simply "aging" of requests in queue
93
 
                 */
94
 
                if (__rq->elevator_sequence-- <= 0)
95
 
                        break;
96
 
 
97
 
                if (__rq->waiting)
98
 
                        continue;
99
 
                if (__rq->rq_dev != bh->b_rdev)
100
 
                        continue;
101
 
                if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
102
 
                        *req = __rq;
103
 
                if (__rq->cmd != rw)
104
 
                        continue;
105
 
                if (__rq->nr_sectors + count > max_sectors)
106
 
                        continue;
107
 
                if (__rq->elevator_sequence < count)
108
 
                        break;
109
 
                if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
110
 
                        ret = ELEVATOR_BACK_MERGE;
111
 
                        *req = __rq;
112
 
                        break;
113
 
                } else if (__rq->sector - count == bh->b_rsector) {
114
 
                        ret = ELEVATOR_FRONT_MERGE;
115
 
                        __rq->elevator_sequence -= count;
116
 
                        *req = __rq;
117
 
                        break;
118
 
                }
119
 
        }
120
 
 
121
 
        return ret;
122
 
}
123
 
 
124
 
void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int count)
125
 
{
126
 
        struct list_head *entry = &req->queue, *head = &q->queue_head;
127
 
 
128
 
        /*
129
 
         * second pass scan of requests that got passed over, if any
130
 
         */
131
 
        while ((entry = entry->next) != head) {
132
 
                struct request *tmp = blkdev_entry_to_request(entry);
133
 
                tmp->elevator_sequence -= count;
134
 
        }
135
 
}
136
 
 
137
 
void elevator_linus_merge_req(struct request *req, struct request *next)
138
 
{
139
 
        if (next->elevator_sequence < req->elevator_sequence)
140
 
                req->elevator_sequence = next->elevator_sequence;
141
 
}
142
 
 
143
 
/*
144
 
 * See if we can find a request that this buffer can be coalesced with.
145
 
 */
146
 
int elevator_noop_merge(request_queue_t *q, struct request **req,
147
 
                        struct list_head * head,
148
 
                        struct buffer_head *bh, int rw,
149
 
                        int max_sectors)
150
 
{
151
 
        struct list_head *entry;
152
 
        unsigned int count = bh->b_size >> 9;
153
 
 
154
 
        if (list_empty(&q->queue_head))
155
 
                return ELEVATOR_NO_MERGE;
156
 
 
157
 
        entry = &q->queue_head;
158
 
        while ((entry = entry->prev) != head) {
159
 
                struct request *__rq = blkdev_entry_to_request(entry);
160
 
 
161
 
                if (__rq->cmd != rw)
162
 
                        continue;
163
 
                if (__rq->rq_dev != bh->b_rdev)
164
 
                        continue;
165
 
                if (__rq->nr_sectors + count > max_sectors)
166
 
                        continue;
167
 
                if (__rq->waiting)
168
 
                        continue;
169
 
                if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
170
 
                        *req = __rq;
171
 
                        return ELEVATOR_BACK_MERGE;
172
 
                } else if (__rq->sector - count == bh->b_rsector) {
173
 
                        *req = __rq;
174
 
                        return ELEVATOR_FRONT_MERGE;
175
 
                }
176
 
        }
177
 
 
178
 
        *req = blkdev_entry_to_request(q->queue_head.prev);
179
 
        return ELEVATOR_NO_MERGE;
180
 
}
181
 
 
182
 
void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int count) {}
183
 
 
184
 
void elevator_noop_merge_req(struct request *req, struct request *next) {}
185
 
 
186
 
int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
187
 
{
188
 
        blkelv_ioctl_arg_t output;
189
 
 
190
 
        output.queue_ID                 = elevator->queue_ID;
191
 
        output.read_latency             = elevator->read_latency;
192
 
        output.write_latency            = elevator->write_latency;
193
 
        output.max_bomb_segments        = 0;
194
 
 
195
 
        if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
196
 
                return -EFAULT;
197
 
 
198
 
        return 0;
199
 
}
200
 
 
201
 
int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
202
 
{
203
 
        blkelv_ioctl_arg_t input;
204
 
 
205
 
        if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
206
 
                return -EFAULT;
207
 
 
208
 
        if (input.read_latency < 0)
209
 
                return -EINVAL;
210
 
        if (input.write_latency < 0)
211
 
                return -EINVAL;
212
 
 
213
 
        elevator->read_latency          = input.read_latency;
214
 
        elevator->write_latency         = input.write_latency;
215
 
        return 0;
216
 
}
217
 
 
218
 
void elevator_init(elevator_t * elevator, elevator_t type)
219
 
{
220
 
        static unsigned int queue_ID;
221
 
 
222
 
        *elevator = type;
223
 
        elevator->queue_ID = queue_ID++;
224
 
}