make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / drivers / block / elevator.c
1 /*
2  *  linux/drivers/block/elevator.c
3  *
4  *  Block device elevator/IO-scheduler.
5  *
6  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7  *
8  * 30042000 Jens Axboe <axboe@suse.de> :
9  *
10  * Split the elevator a bit so that it is possible to choose a different
11  * one or even write a new "plug in". There are three pieces:
12  * - elevator_fn, inserts a new request in the queue list
13  * - elevator_merge_fn, decides whether a new buffer can be merged with
14  *   an existing request
15  * - elevator_dequeue_fn, called when a request is taken off the active list
16  *
17  * 20082000 Dave Jones <davej@suse.de> :
18  * Removed tests for max-bomb-segments, which was breaking elvtune
19  *  when run without -bN
20  *
21  */
22
23 #include <linux/fs.h>
24 #include <linux/blkdev.h>
25 #include <linux/elevator.h>
26 #include <linux/blk.h>
27 #include <linux/module.h>
28 #include <asm/uaccess.h>
29
30 /*
31  * This is a bit tricky. It's given that bh and rq are for the same
32  * device, but the next request might of course not be. Run through
33  * the tests below to check if we want to insert here if we can't merge
34  * bh into an existing request
35  */
36 inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq,
37                             struct list_head *head)
38 {
39         struct list_head *next;
40         struct request *next_rq;
41
42         next = rq->queue.next;
43         if (next == head)
44                 return 0;
45
46         /*
47          * if the device is different (usually on a different partition),
48          * just check if bh is after rq
49          */
50         next_rq = blkdev_entry_to_request(next);
51         if (next_rq->rq_dev != rq->rq_dev)
52                 return bh->b_rsector > rq->sector;
53
54         /*
55          * ok, rq, next_rq and bh are on the same device. if bh is in between
56          * the two, this is the sweet spot
57          */
58         if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
59                 return 1;
60
61         /*
62          * next_rq is ordered wrt rq, but bh is not in between the two
63          */
64         if (next_rq->sector > rq->sector)
65                 return 0;
66
67         /*
68          * next_rq and rq not ordered, if we happen to be either before
69          * next_rq or after rq insert here anyway
70          */
71         if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
72                 return 1;
73
74         return 0;
75 }
76
77
78 int elevator_linus_merge(request_queue_t *q, struct request **req,
79                          struct list_head * head,
80                          struct buffer_head *bh, int rw,
81                          int max_sectors)
82 {
83         struct list_head *entry = &q->queue_head;
84         unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
85         struct request *__rq;
86
87         while ((entry = entry->prev) != head) {
88                 __rq = blkdev_entry_to_request(entry);
89
90                 /*
91                  * we can't insert beyond a zero sequence point
92                  */
93                 if (__rq->elevator_sequence <= 0)
94                         break;
95
96                 if (__rq->waiting)
97                         continue;
98                 if (__rq->rq_dev != bh->b_rdev)
99                         continue;
100                 if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
101                         *req = __rq;
102                 if (__rq->cmd != rw)
103                         continue;
104                 if (__rq->nr_sectors + count > max_sectors)
105                         continue;
106                 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
107                         ret = ELEVATOR_BACK_MERGE;
108                         *req = __rq;
109                         break;
110                 } else if (__rq->sector - count == bh->b_rsector) {
111                         ret = ELEVATOR_FRONT_MERGE;
112                         __rq->elevator_sequence--;
113                         *req = __rq;
114                         break;
115                 }
116         }
117
118         /*
119          * account merge (ret != 0, cost is 1) or seeky insert (*req is set,
120          * cost is ELV_LINUS_SEEK_COST
121          */
122         if (*req) {
123                 int scan_cost = ret ? 1 : ELV_LINUS_SEEK_COST;
124                 struct list_head *entry = &(*req)->queue;
125
126                 while ((entry = entry->next) != &q->queue_head) {
127                         __rq = blkdev_entry_to_request(entry);
128                         __rq->elevator_sequence -= scan_cost;
129                 }
130         }
131
132         return ret;
133 }
134
135 void elevator_linus_merge_req(struct request *req, struct request *next)
136 {
137         if (next->elevator_sequence < req->elevator_sequence)
138                 req->elevator_sequence = next->elevator_sequence;
139 }
140
141 /*
142  * See if we can find a request that this buffer can be coalesced with.
143  */
144 int elevator_noop_merge(request_queue_t *q, struct request **req,
145                         struct list_head * head,
146                         struct buffer_head *bh, int rw,
147                         int max_sectors)
148 {
149         struct list_head *entry;
150         unsigned int count = bh->b_size >> 9;
151
152         if (list_empty(&q->queue_head))
153                 return ELEVATOR_NO_MERGE;
154
155         entry = &q->queue_head;
156         while ((entry = entry->prev) != head) {
157                 struct request *__rq = blkdev_entry_to_request(entry);
158
159                 if (__rq->cmd != rw)
160                         continue;
161                 if (__rq->rq_dev != bh->b_rdev)
162                         continue;
163                 if (__rq->nr_sectors + count > max_sectors)
164                         continue;
165                 if (__rq->waiting)
166                         continue;
167                 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
168                         *req = __rq;
169                         return ELEVATOR_BACK_MERGE;
170                 } else if (__rq->sector - count == bh->b_rsector) {
171                         *req = __rq;
172                         return ELEVATOR_FRONT_MERGE;
173                 }
174         }
175
176         *req = blkdev_entry_to_request(q->queue_head.prev);
177         return ELEVATOR_NO_MERGE;
178 }
179
180 void elevator_noop_merge_req(struct request *req, struct request *next) {}
181
182 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
183 {
184         blkelv_ioctl_arg_t output;
185
186         output.queue_ID                 = elevator->queue_ID;
187         output.read_latency             = elevator->read_latency;
188         output.write_latency            = elevator->write_latency;
189         output.max_bomb_segments        = 0;
190
191         if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
192                 return -EFAULT;
193
194         return 0;
195 }
196
197 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
198 {
199         blkelv_ioctl_arg_t input;
200
201         if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
202                 return -EFAULT;
203
204         if (input.read_latency < 0)
205                 return -EINVAL;
206         if (input.write_latency < 0)
207                 return -EINVAL;
208
209         elevator->read_latency          = input.read_latency;
210         elevator->write_latency         = input.write_latency;
211         return 0;
212 }
213
214 void elevator_init(elevator_t * elevator, elevator_t type)
215 {
216         static unsigned int queue_ID;
217
218         *elevator = type;
219         elevator->queue_ID = queue_ID++;
220 }