2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
15 * - elevator_dequeue_fn, called when a request is taken off the active list
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
24 #include <linux/blkdev.h>
25 #include <linux/elevator.h>
26 #include <linux/blk.h>
27 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 * This is a bit tricky. It's given that bh and rq are for the same
32 * device, but the next request might of course not be. Run through
33 * the tests below to check if we want to insert here if we can't merge
34 * bh into an existing request
36 inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq,
37 struct list_head *head)
39 struct list_head *next;
40 struct request *next_rq;
42 next = rq->queue.next;
47 * if the device is different (usually on a different partition),
48 * just check if bh is after rq
50 next_rq = blkdev_entry_to_request(next);
51 if (next_rq->rq_dev != rq->rq_dev)
52 return bh->b_rsector > rq->sector;
55 * ok, rq, next_rq and bh are on the same device. if bh is in between
56 * the two, this is the sweet spot
58 if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
62 * next_rq is ordered wrt rq, but bh is not in between the two
64 if (next_rq->sector > rq->sector)
68 * next_rq and rq not ordered, if we happen to be either before
69 * next_rq or after rq insert here anyway
71 if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
78 int elevator_linus_merge(request_queue_t *q, struct request **req,
79 struct list_head * head,
80 struct buffer_head *bh, int rw,
83 struct list_head *entry = &q->queue_head;
84 unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
87 while ((entry = entry->prev) != head) {
88 __rq = blkdev_entry_to_request(entry);
91 * we can't insert beyond a zero sequence point
93 if (__rq->elevator_sequence <= 0)
98 if (__rq->rq_dev != bh->b_rdev)
100 if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
104 if (__rq->nr_sectors + count > max_sectors)
106 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
107 ret = ELEVATOR_BACK_MERGE;
110 } else if (__rq->sector - count == bh->b_rsector) {
111 ret = ELEVATOR_FRONT_MERGE;
112 __rq->elevator_sequence--;
119 * account merge (ret != 0, cost is 1) or seeky insert (*req is set,
120 * cost is ELV_LINUS_SEEK_COST
123 int scan_cost = ret ? 1 : ELV_LINUS_SEEK_COST;
124 struct list_head *entry = &(*req)->queue;
126 while ((entry = entry->next) != &q->queue_head) {
127 __rq = blkdev_entry_to_request(entry);
128 __rq->elevator_sequence -= scan_cost;
135 void elevator_linus_merge_req(struct request *req, struct request *next)
137 if (next->elevator_sequence < req->elevator_sequence)
138 req->elevator_sequence = next->elevator_sequence;
142 * See if we can find a request that this buffer can be coalesced with.
144 int elevator_noop_merge(request_queue_t *q, struct request **req,
145 struct list_head * head,
146 struct buffer_head *bh, int rw,
149 struct list_head *entry;
150 unsigned int count = bh->b_size >> 9;
152 if (list_empty(&q->queue_head))
153 return ELEVATOR_NO_MERGE;
155 entry = &q->queue_head;
156 while ((entry = entry->prev) != head) {
157 struct request *__rq = blkdev_entry_to_request(entry);
161 if (__rq->rq_dev != bh->b_rdev)
163 if (__rq->nr_sectors + count > max_sectors)
167 if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
169 return ELEVATOR_BACK_MERGE;
170 } else if (__rq->sector - count == bh->b_rsector) {
172 return ELEVATOR_FRONT_MERGE;
176 *req = blkdev_entry_to_request(q->queue_head.prev);
177 return ELEVATOR_NO_MERGE;
180 void elevator_noop_merge_req(struct request *req, struct request *next) {}
182 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
184 blkelv_ioctl_arg_t output;
186 output.queue_ID = elevator->queue_ID;
187 output.read_latency = elevator->read_latency;
188 output.write_latency = elevator->write_latency;
189 output.max_bomb_segments = 0;
191 if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
197 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
199 blkelv_ioctl_arg_t input;
201 if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
204 if (input.read_latency < 0)
206 if (input.write_latency < 0)
209 elevator->read_latency = input.read_latency;
210 elevator->write_latency = input.write_latency;
214 void elevator_init(elevator_t * elevator, elevator_t type)
216 static unsigned int queue_ID;
219 elevator->queue_ID = queue_ID++;