cfq-iosched: rework the whole round-robin list concept
[powerpc.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/hash.h>
13 #include <linux/rbtree.h>
14 #include <linux/ioprio.h>
15
16 /*
17  * tunables
18  */
19 static const int cfq_quantum = 4;               /* max queue in one round of service */
20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
21 static const int cfq_back_max = 16 * 1024;      /* maximum backwards seek, in KiB */
22 static const int cfq_back_penalty = 2;          /* penalty of a backwards seek */
23
24 static const int cfq_slice_sync = HZ / 10;
25 static int cfq_slice_async = HZ / 25;
26 static const int cfq_slice_async_rq = 2;
27 static int cfq_slice_idle = HZ / 125;
28
29 /*
30  * grace period before allowing idle class to get disk access
31  */
32 #define CFQ_IDLE_GRACE          (HZ / 10)
33
34 /*
35  * below this threshold, we consider thinktime immediate
36  */
37 #define CFQ_MIN_TT              (2)
38
39 #define CFQ_SLICE_SCALE         (5)
40
41 #define CFQ_KEY_ASYNC           (0)
42
43 /*
44  * for the hash of cfqq inside the cfqd
45  */
46 #define CFQ_QHASH_SHIFT         6
47 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
48 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
49
50 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
51
52 #define RQ_CIC(rq)              ((struct cfq_io_context*)(rq)->elevator_private)
53 #define RQ_CFQQ(rq)             ((rq)->elevator_private2)
54
55 static struct kmem_cache *cfq_pool;
56 static struct kmem_cache *cfq_ioc_pool;
57
58 static DEFINE_PER_CPU(unsigned long, ioc_count);
59 static struct completion *ioc_gone;
60
61 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
62 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
63 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
64
65 #define ASYNC                   (0)
66 #define SYNC                    (1)
67
68 #define cfq_cfqq_sync(cfqq)     ((cfqq)->key != CFQ_KEY_ASYNC)
69
70 #define sample_valid(samples)   ((samples) > 80)
71
72 /*
73  * Per block device queue structure
74  */
75 struct cfq_data {
76         request_queue_t *queue;
77
78         /*
79          * rr list of queues with requests and the count of them
80          */
81         struct rb_root service_tree;
82         struct list_head cur_rr;
83         struct list_head idle_rr;
84         unsigned int busy_queues;
85
86         /*
87          * cfqq lookup hash
88          */
89         struct hlist_head *cfq_hash;
90
91         int rq_in_driver;
92         int hw_tag;
93
94         /*
95          * idle window management
96          */
97         struct timer_list idle_slice_timer;
98         struct work_struct unplug_work;
99
100         struct cfq_queue *active_queue;
101         struct cfq_io_context *active_cic;
102         unsigned int dispatch_slice;
103
104         struct timer_list idle_class_timer;
105
106         sector_t last_position;
107         unsigned long last_end_request;
108
109         /*
110          * tunables, see top of file
111          */
112         unsigned int cfq_quantum;
113         unsigned int cfq_fifo_expire[2];
114         unsigned int cfq_back_penalty;
115         unsigned int cfq_back_max;
116         unsigned int cfq_slice[2];
117         unsigned int cfq_slice_async_rq;
118         unsigned int cfq_slice_idle;
119
120         struct list_head cic_list;
121
122         sector_t new_seek_mean;
123         u64 new_seek_total;
124 };
125
126 /*
127  * Per process-grouping structure
128  */
129 struct cfq_queue {
130         /* reference count */
131         atomic_t ref;
132         /* parent cfq_data */
133         struct cfq_data *cfqd;
134         /* cfqq lookup hash */
135         struct hlist_node cfq_hash;
136         /* hash key */
137         unsigned int key;
138         /* member of the rr/busy/cur/idle cfqd list */
139         struct list_head cfq_list;
140         /* service_tree member */
141         struct rb_node rb_node;
142         /* service_tree key */
143         unsigned long rb_key;
144         /* sorted list of pending requests */
145         struct rb_root sort_list;
146         /* if fifo isn't expired, next request to serve */
147         struct request *next_rq;
148         /* requests queued in sort_list */
149         int queued[2];
150         /* currently allocated requests */
151         int allocated[2];
152         /* pending metadata requests */
153         int meta_pending;
154         /* fifo list of requests in sort_list */
155         struct list_head fifo;
156
157         unsigned long slice_end;
158         long slice_resid;
159
160         /* number of requests that are on the dispatch list or inside driver */
161         int dispatched;
162
163         /* io prio of this group */
164         unsigned short ioprio, org_ioprio;
165         unsigned short ioprio_class, org_ioprio_class;
166
167         /* various state flags, see below */
168         unsigned int flags;
169
170         sector_t last_request_pos;
171 };
172
173 enum cfqq_state_flags {
174         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
175         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
176         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
177         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
178         CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
179         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
180         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
181         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
182         CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
183         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
184 };
185
186 #define CFQ_CFQQ_FNS(name)                                              \
187 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
188 {                                                                       \
189         cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
190 }                                                                       \
191 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
192 {                                                                       \
193         cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
194 }                                                                       \
195 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
196 {                                                                       \
197         return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
198 }
199
200 CFQ_CFQQ_FNS(on_rr);
201 CFQ_CFQQ_FNS(wait_request);
202 CFQ_CFQQ_FNS(must_alloc);
203 CFQ_CFQQ_FNS(must_alloc_slice);
204 CFQ_CFQQ_FNS(must_dispatch);
205 CFQ_CFQQ_FNS(fifo_expire);
206 CFQ_CFQQ_FNS(idle_window);
207 CFQ_CFQQ_FNS(prio_changed);
208 CFQ_CFQQ_FNS(queue_new);
209 CFQ_CFQQ_FNS(slice_new);
210 #undef CFQ_CFQQ_FNS
211
212 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
213 static void cfq_dispatch_insert(request_queue_t *, struct request *);
214 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
215
216 /*
217  * scheduler run of queue, if there are requests pending and no one in the
218  * driver that will restart queueing
219  */
220 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
221 {
222         if (cfqd->busy_queues)
223                 kblockd_schedule_work(&cfqd->unplug_work);
224 }
225
226 static int cfq_queue_empty(request_queue_t *q)
227 {
228         struct cfq_data *cfqd = q->elevator->elevator_data;
229
230         return !cfqd->busy_queues;
231 }
232
233 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
234 {
235         /*
236          * Use the per-process queue, for read requests and syncronous writes
237          */
238         if (!(rw & REQ_RW) || is_sync)
239                 return task->pid;
240
241         return CFQ_KEY_ASYNC;
242 }
243
244 /*
245  * Scale schedule slice based on io priority. Use the sync time slice only
246  * if a queue is marked sync and has sync io queued. A sync queue with async
247  * io only, should not get full sync slice length.
248  */
249 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
250                                  unsigned short prio)
251 {
252         const int base_slice = cfqd->cfq_slice[sync];
253
254         WARN_ON(prio >= IOPRIO_BE_NR);
255
256         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
257 }
258
259 static inline int
260 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
261 {
262         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
263 }
264
265 static inline void
266 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
267 {
268         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
269 }
270
271 /*
272  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
273  * isn't valid until the first request from the dispatch is activated
274  * and the slice time set.
275  */
276 static inline int cfq_slice_used(struct cfq_queue *cfqq)
277 {
278         if (cfq_cfqq_slice_new(cfqq))
279                 return 0;
280         if (time_before(jiffies, cfqq->slice_end))
281                 return 0;
282
283         return 1;
284 }
285
286 /*
287  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
288  * We choose the request that is closest to the head right now. Distance
289  * behind the head is penalized and only allowed to a certain extent.
290  */
291 static struct request *
292 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
293 {
294         sector_t last, s1, s2, d1 = 0, d2 = 0;
295         unsigned long back_max;
296 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
297 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
298         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
299
300         if (rq1 == NULL || rq1 == rq2)
301                 return rq2;
302         if (rq2 == NULL)
303                 return rq1;
304
305         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
306                 return rq1;
307         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
308                 return rq2;
309         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
310                 return rq1;
311         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
312                 return rq2;
313
314         s1 = rq1->sector;
315         s2 = rq2->sector;
316
317         last = cfqd->last_position;
318
319         /*
320          * by definition, 1KiB is 2 sectors
321          */
322         back_max = cfqd->cfq_back_max * 2;
323
324         /*
325          * Strict one way elevator _except_ in the case where we allow
326          * short backward seeks which are biased as twice the cost of a
327          * similar forward seek.
328          */
329         if (s1 >= last)
330                 d1 = s1 - last;
331         else if (s1 + back_max >= last)
332                 d1 = (last - s1) * cfqd->cfq_back_penalty;
333         else
334                 wrap |= CFQ_RQ1_WRAP;
335
336         if (s2 >= last)
337                 d2 = s2 - last;
338         else if (s2 + back_max >= last)
339                 d2 = (last - s2) * cfqd->cfq_back_penalty;
340         else
341                 wrap |= CFQ_RQ2_WRAP;
342
343         /* Found required data */
344
345         /*
346          * By doing switch() on the bit mask "wrap" we avoid having to
347          * check two variables for all permutations: --> faster!
348          */
349         switch (wrap) {
350         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
351                 if (d1 < d2)
352                         return rq1;
353                 else if (d2 < d1)
354                         return rq2;
355                 else {
356                         if (s1 >= s2)
357                                 return rq1;
358                         else
359                                 return rq2;
360                 }
361
362         case CFQ_RQ2_WRAP:
363                 return rq1;
364         case CFQ_RQ1_WRAP:
365                 return rq2;
366         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
367         default:
368                 /*
369                  * Since both rqs are wrapped,
370                  * start with the one that's further behind head
371                  * (--> only *one* back seek required),
372                  * since back seek takes more time than forward.
373                  */
374                 if (s1 <= s2)
375                         return rq1;
376                 else
377                         return rq2;
378         }
379 }
380
381 /*
382  * would be nice to take fifo expire time into account as well
383  */
384 static struct request *
385 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
386                   struct request *last)
387 {
388         struct rb_node *rbnext = rb_next(&last->rb_node);
389         struct rb_node *rbprev = rb_prev(&last->rb_node);
390         struct request *next = NULL, *prev = NULL;
391
392         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
393
394         if (rbprev)
395                 prev = rb_entry_rq(rbprev);
396
397         if (rbnext)
398                 next = rb_entry_rq(rbnext);
399         else {
400                 rbnext = rb_first(&cfqq->sort_list);
401                 if (rbnext && rbnext != &last->rb_node)
402                         next = rb_entry_rq(rbnext);
403         }
404
405         return cfq_choose_req(cfqd, next, prev);
406 }
407
408 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
409                                       struct cfq_queue *cfqq)
410 {
411         /*
412          * just an approximation, should be ok.
413          */
414         return ((cfqd->busy_queues - 1) * cfq_prio_slice(cfqd, 1, 0));
415 }
416
417 static void cfq_service_tree_add(struct cfq_data *cfqd,
418                                     struct cfq_queue *cfqq)
419 {
420         struct rb_node **p = &cfqd->service_tree.rb_node;
421         struct rb_node *parent = NULL;
422         struct cfq_queue *__cfqq;
423         unsigned long rb_key;
424
425         rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
426         rb_key += cfqq->slice_resid;
427         cfqq->slice_resid = 0;
428
429         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
430                 /*
431                  * same position, nothing more to do
432                  */
433                 if (rb_key == cfqq->rb_key)
434                         return;
435
436                 rb_erase(&cfqq->rb_node, &cfqd->service_tree);
437         }
438
439         while (*p) {
440                 parent = *p;
441                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
442
443                 if (rb_key < __cfqq->rb_key)
444                         p = &(*p)->rb_left;
445                 else
446                         p = &(*p)->rb_right;
447         }
448
449         cfqq->rb_key = rb_key;
450         rb_link_node(&cfqq->rb_node, parent, p);
451         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree);
452 }
453
454 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
455 {
456         struct cfq_data *cfqd = cfqq->cfqd;
457         struct list_head *n;
458
459         /*
460          * Resorting requires the cfqq to be on the RR list already.
461          */
462         if (!cfq_cfqq_on_rr(cfqq))
463                 return;
464
465         list_del_init(&cfqq->cfq_list);
466
467         if (cfq_class_rt(cfqq)) {
468                 /*
469                  * At to the front of the current list, but behind other
470                  * RT queues.
471                  */
472                 n = &cfqd->cur_rr;
473                 while (n->next != &cfqd->cur_rr)
474                         if (!cfq_class_rt(cfqq))
475                                 break;
476
477                 list_add(&cfqq->cfq_list, n);
478         } else if (cfq_class_idle(cfqq)) {
479                 /*
480                  * IDLE goes to the tail of the idle list
481                  */
482                 list_add_tail(&cfqq->cfq_list, &cfqd->idle_rr);
483         } else {
484                 /*
485                  * So we get here, ergo the queue is a regular best-effort queue
486                  */
487                 cfq_service_tree_add(cfqd, cfqq);
488         }
489 }
490
491 /*
492  * add to busy list of queues for service, trying to be fair in ordering
493  * the pending list according to last request service
494  */
495 static inline void
496 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
497 {
498         BUG_ON(cfq_cfqq_on_rr(cfqq));
499         cfq_mark_cfqq_on_rr(cfqq);
500         cfqd->busy_queues++;
501
502         cfq_resort_rr_list(cfqq, 0);
503 }
504
505 static inline void
506 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
507 {
508         BUG_ON(!cfq_cfqq_on_rr(cfqq));
509         cfq_clear_cfqq_on_rr(cfqq);
510         list_del_init(&cfqq->cfq_list);
511
512         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
513                 rb_erase(&cfqq->rb_node, &cfqd->service_tree);
514                 RB_CLEAR_NODE(&cfqq->rb_node);
515         }
516
517         BUG_ON(!cfqd->busy_queues);
518         cfqd->busy_queues--;
519 }
520
521 /*
522  * rb tree support functions
523  */
524 static inline void cfq_del_rq_rb(struct request *rq)
525 {
526         struct cfq_queue *cfqq = RQ_CFQQ(rq);
527         struct cfq_data *cfqd = cfqq->cfqd;
528         const int sync = rq_is_sync(rq);
529
530         BUG_ON(!cfqq->queued[sync]);
531         cfqq->queued[sync]--;
532
533         elv_rb_del(&cfqq->sort_list, rq);
534
535         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
536                 cfq_del_cfqq_rr(cfqd, cfqq);
537 }
538
539 static void cfq_add_rq_rb(struct request *rq)
540 {
541         struct cfq_queue *cfqq = RQ_CFQQ(rq);
542         struct cfq_data *cfqd = cfqq->cfqd;
543         struct request *__alias;
544
545         cfqq->queued[rq_is_sync(rq)]++;
546
547         /*
548          * looks a little odd, but the first insert might return an alias.
549          * if that happens, put the alias on the dispatch list
550          */
551         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
552                 cfq_dispatch_insert(cfqd->queue, __alias);
553
554         if (!cfq_cfqq_on_rr(cfqq))
555                 cfq_add_cfqq_rr(cfqd, cfqq);
556
557         /*
558          * check if this request is a better next-serve candidate
559          */
560         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
561         BUG_ON(!cfqq->next_rq);
562 }
563
564 static inline void
565 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
566 {
567         elv_rb_del(&cfqq->sort_list, rq);
568         cfqq->queued[rq_is_sync(rq)]--;
569         cfq_add_rq_rb(rq);
570 }
571
572 static struct request *
573 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
574 {
575         struct task_struct *tsk = current;
576         pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
577         struct cfq_queue *cfqq;
578
579         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
580         if (cfqq) {
581                 sector_t sector = bio->bi_sector + bio_sectors(bio);
582
583                 return elv_rb_find(&cfqq->sort_list, sector);
584         }
585
586         return NULL;
587 }
588
589 static void cfq_activate_request(request_queue_t *q, struct request *rq)
590 {
591         struct cfq_data *cfqd = q->elevator->elevator_data;
592
593         cfqd->rq_in_driver++;
594
595         /*
596          * If the depth is larger 1, it really could be queueing. But lets
597          * make the mark a little higher - idling could still be good for
598          * low queueing, and a low queueing number could also just indicate
599          * a SCSI mid layer like behaviour where limit+1 is often seen.
600          */
601         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
602                 cfqd->hw_tag = 1;
603
604         cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
605 }
606
607 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
608 {
609         struct cfq_data *cfqd = q->elevator->elevator_data;
610
611         WARN_ON(!cfqd->rq_in_driver);
612         cfqd->rq_in_driver--;
613 }
614
615 static void cfq_remove_request(struct request *rq)
616 {
617         struct cfq_queue *cfqq = RQ_CFQQ(rq);
618
619         if (cfqq->next_rq == rq)
620                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
621
622         list_del_init(&rq->queuelist);
623         cfq_del_rq_rb(rq);
624
625         if (rq_is_meta(rq)) {
626                 WARN_ON(!cfqq->meta_pending);
627                 cfqq->meta_pending--;
628         }
629 }
630
631 static int
632 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
633 {
634         struct cfq_data *cfqd = q->elevator->elevator_data;
635         struct request *__rq;
636
637         __rq = cfq_find_rq_fmerge(cfqd, bio);
638         if (__rq && elv_rq_merge_ok(__rq, bio)) {
639                 *req = __rq;
640                 return ELEVATOR_FRONT_MERGE;
641         }
642
643         return ELEVATOR_NO_MERGE;
644 }
645
646 static void cfq_merged_request(request_queue_t *q, struct request *req,
647                                int type)
648 {
649         if (type == ELEVATOR_FRONT_MERGE) {
650                 struct cfq_queue *cfqq = RQ_CFQQ(req);
651
652                 cfq_reposition_rq_rb(cfqq, req);
653         }
654 }
655
656 static void
657 cfq_merged_requests(request_queue_t *q, struct request *rq,
658                     struct request *next)
659 {
660         /*
661          * reposition in fifo if next is older than rq
662          */
663         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
664             time_before(next->start_time, rq->start_time))
665                 list_move(&rq->queuelist, &next->queuelist);
666
667         cfq_remove_request(next);
668 }
669
670 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
671                            struct bio *bio)
672 {
673         struct cfq_data *cfqd = q->elevator->elevator_data;
674         const int rw = bio_data_dir(bio);
675         struct cfq_queue *cfqq;
676         pid_t key;
677
678         /*
679          * Disallow merge of a sync bio into an async request.
680          */
681         if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
682                 return 0;
683
684         /*
685          * Lookup the cfqq that this bio will be queued with. Allow
686          * merge only if rq is queued there.
687          */
688         key = cfq_queue_pid(current, rw, bio_sync(bio));
689         cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
690
691         if (cfqq == RQ_CFQQ(rq))
692                 return 1;
693
694         return 0;
695 }
696
697 static inline void
698 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
699 {
700         if (cfqq) {
701                 /*
702                  * stop potential idle class queues waiting service
703                  */
704                 del_timer(&cfqd->idle_class_timer);
705
706                 cfqq->slice_end = 0;
707                 cfq_clear_cfqq_must_alloc_slice(cfqq);
708                 cfq_clear_cfqq_fifo_expire(cfqq);
709                 cfq_mark_cfqq_slice_new(cfqq);
710                 cfq_clear_cfqq_queue_new(cfqq);
711         }
712
713         cfqd->active_queue = cfqq;
714 }
715
716 /*
717  * current cfqq expired its slice (or was too idle), select new one
718  */
719 static void
720 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
721                     int preempted, int timed_out)
722 {
723         if (cfq_cfqq_wait_request(cfqq))
724                 del_timer(&cfqd->idle_slice_timer);
725
726         cfq_clear_cfqq_must_dispatch(cfqq);
727         cfq_clear_cfqq_wait_request(cfqq);
728
729         /*
730          * store what was left of this slice, if the queue idled out
731          * or was preempted
732          */
733         if (timed_out && !cfq_cfqq_slice_new(cfqq))
734                 cfqq->slice_resid = cfqq->slice_end - jiffies;
735
736         cfq_resort_rr_list(cfqq, preempted);
737
738         if (cfqq == cfqd->active_queue)
739                 cfqd->active_queue = NULL;
740
741         if (cfqd->active_cic) {
742                 put_io_context(cfqd->active_cic->ioc);
743                 cfqd->active_cic = NULL;
744         }
745
746         cfqd->dispatch_slice = 0;
747 }
748
749 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted,
750                                      int timed_out)
751 {
752         struct cfq_queue *cfqq = cfqd->active_queue;
753
754         if (cfqq)
755                 __cfq_slice_expired(cfqd, cfqq, preempted, timed_out);
756 }
757
758 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
759 {
760         struct cfq_queue *cfqq = NULL;
761
762         if (!list_empty(&cfqd->cur_rr)) {
763                 /*
764                  * if current list is non-empty, grab first entry.
765                  */
766                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
767         } else if (!RB_EMPTY_ROOT(&cfqd->service_tree)) {
768                 struct rb_node *n = rb_first(&cfqd->service_tree);
769
770                 cfqq = rb_entry(n, struct cfq_queue, rb_node);
771         } else if (!list_empty(&cfqd->idle_rr)) {
772                 /*
773                  * if we have idle queues and no rt or be queues had pending
774                  * requests, either allow immediate service if the grace period
775                  * has passed or arm the idle grace timer
776                  */
777                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
778
779                 if (time_after_eq(jiffies, end))
780                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
781                 else
782                         mod_timer(&cfqd->idle_class_timer, end);
783         }
784
785         return cfqq;
786 }
787
788 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
789 {
790         struct cfq_queue *cfqq;
791
792         cfqq = cfq_get_next_queue(cfqd);
793         __cfq_set_active_queue(cfqd, cfqq);
794         return cfqq;
795 }
796
797 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
798                                           struct request *rq)
799 {
800         if (rq->sector >= cfqd->last_position)
801                 return rq->sector - cfqd->last_position;
802         else
803                 return cfqd->last_position - rq->sector;
804 }
805
806 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
807 {
808         struct cfq_io_context *cic = cfqd->active_cic;
809
810         if (!sample_valid(cic->seek_samples))
811                 return 0;
812
813         return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
814 }
815
816 static int cfq_close_cooperator(struct cfq_data *cfq_data,
817                                 struct cfq_queue *cfqq)
818 {
819         /*
820          * We should notice if some of the queues are cooperating, eg
821          * working closely on the same area of the disk. In that case,
822          * we can group them together and don't waste time idling.
823          */
824         return 0;
825 }
826
827 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
828
829 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
830 {
831         struct cfq_queue *cfqq = cfqd->active_queue;
832         struct cfq_io_context *cic;
833         unsigned long sl;
834
835         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
836         WARN_ON(cfq_cfqq_slice_new(cfqq));
837
838         /*
839          * idle is disabled, either manually or by past process history
840          */
841         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
842                 return;
843
844         /*
845          * task has exited, don't wait
846          */
847         cic = cfqd->active_cic;
848         if (!cic || !cic->ioc->task)
849                 return;
850
851         /*
852          * See if this prio level has a good candidate
853          */
854         if (cfq_close_cooperator(cfqd, cfqq) &&
855             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
856                 return;
857
858         cfq_mark_cfqq_must_dispatch(cfqq);
859         cfq_mark_cfqq_wait_request(cfqq);
860
861         /*
862          * we don't want to idle for seeks, but we do want to allow
863          * fair distribution of slice time for a process doing back-to-back
864          * seeks. so allow a little bit of time for him to submit a new rq
865          */
866         sl = cfqd->cfq_slice_idle;
867         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
868                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
869
870         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
871 }
872
873 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
874 {
875         struct cfq_queue *cfqq = RQ_CFQQ(rq);
876
877         cfq_remove_request(rq);
878         cfqq->dispatched++;
879         elv_dispatch_sort(q, rq);
880 }
881
882 /*
883  * return expired entry, or NULL to just start from scratch in rbtree
884  */
885 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
886 {
887         struct cfq_data *cfqd = cfqq->cfqd;
888         struct request *rq;
889         int fifo;
890
891         if (cfq_cfqq_fifo_expire(cfqq))
892                 return NULL;
893
894         cfq_mark_cfqq_fifo_expire(cfqq);
895
896         if (list_empty(&cfqq->fifo))
897                 return NULL;
898
899         fifo = cfq_cfqq_sync(cfqq);
900         rq = rq_entry_fifo(cfqq->fifo.next);
901
902         if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
903                 return NULL;
904
905         return rq;
906 }
907
908 static inline int
909 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
910 {
911         const int base_rq = cfqd->cfq_slice_async_rq;
912
913         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
914
915         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
916 }
917
918 /*
919  * get next queue for service
920  */
921 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
922 {
923         struct cfq_queue *cfqq;
924
925         cfqq = cfqd->active_queue;
926         if (!cfqq)
927                 goto new_queue;
928
929         /*
930          * The active queue has run out of time, expire it and select new.
931          */
932         if (cfq_slice_used(cfqq))
933                 goto expire;
934
935         /*
936          * The active queue has requests and isn't expired, allow it to
937          * dispatch.
938          */
939         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
940                 goto keep_queue;
941
942         /*
943          * No requests pending. If the active queue still has requests in
944          * flight or is idling for a new request, allow either of these
945          * conditions to happen (or time out) before selecting a new queue.
946          */
947         if (cfqq->dispatched || timer_pending(&cfqd->idle_slice_timer)) {
948                 cfqq = NULL;
949                 goto keep_queue;
950         }
951
952 expire:
953         cfq_slice_expired(cfqd, 0, 0);
954 new_queue:
955         cfqq = cfq_set_active_queue(cfqd);
956 keep_queue:
957         return cfqq;
958 }
959
960 static int
961 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
962                         int max_dispatch)
963 {
964         int dispatched = 0;
965
966         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
967
968         do {
969                 struct request *rq;
970
971                 /*
972                  * follow expired path, else get first next available
973                  */
974                 if ((rq = cfq_check_fifo(cfqq)) == NULL)
975                         rq = cfqq->next_rq;
976
977                 /*
978                  * finally, insert request into driver dispatch list
979                  */
980                 cfq_dispatch_insert(cfqd->queue, rq);
981
982                 cfqd->dispatch_slice++;
983                 dispatched++;
984
985                 if (!cfqd->active_cic) {
986                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
987                         cfqd->active_cic = RQ_CIC(rq);
988                 }
989
990                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
991                         break;
992
993         } while (dispatched < max_dispatch);
994
995         /*
996          * expire an async queue immediately if it has used up its slice. idle
997          * queue always expire after 1 dispatch round.
998          */
999         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1000             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1001             cfq_class_idle(cfqq))) {
1002                 cfqq->slice_end = jiffies + 1;
1003                 cfq_slice_expired(cfqd, 0, 0);
1004         }
1005
1006         return dispatched;
1007 }
1008
1009 static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1010 {
1011         int dispatched = 0;
1012
1013         while (cfqq->next_rq) {
1014                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1015                 dispatched++;
1016         }
1017
1018         BUG_ON(!list_empty(&cfqq->fifo));
1019         return dispatched;
1020 }
1021
1022 static int cfq_forced_dispatch_cfqqs(struct list_head *list)
1023 {
1024         struct cfq_queue *cfqq, *next;
1025         int dispatched;
1026
1027         dispatched = 0;
1028         list_for_each_entry_safe(cfqq, next, list, cfq_list)
1029                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1030
1031         return dispatched;
1032 }
1033
1034 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1035 {
1036         int dispatched = 0;
1037         struct rb_node *n;
1038
1039         while ((n = rb_first(&cfqd->service_tree)) != NULL) {
1040                 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1041
1042                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1043         }
1044
1045         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
1046         dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
1047
1048         cfq_slice_expired(cfqd, 0, 0);
1049
1050         BUG_ON(cfqd->busy_queues);
1051
1052         return dispatched;
1053 }
1054
1055 static int cfq_dispatch_requests(request_queue_t *q, int force)
1056 {
1057         struct cfq_data *cfqd = q->elevator->elevator_data;
1058         struct cfq_queue *cfqq;
1059         int dispatched;
1060
1061         if (!cfqd->busy_queues)
1062                 return 0;
1063
1064         if (unlikely(force))
1065                 return cfq_forced_dispatch(cfqd);
1066
1067         dispatched = 0;
1068         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1069                 int max_dispatch;
1070
1071                 if (cfqd->busy_queues > 1) {
1072                         /*
1073                          * So we have dispatched before in this round, if the
1074                          * next queue has idling enabled (must be sync), don't
1075                          * allow it service until the previous have completed.
1076                          */
1077                         if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq) &&
1078                             dispatched)
1079                                 break;
1080                         if (cfqq->dispatched >= cfqd->cfq_quantum)
1081                                 break;
1082                 }
1083
1084                 cfq_clear_cfqq_must_dispatch(cfqq);
1085                 cfq_clear_cfqq_wait_request(cfqq);
1086                 del_timer(&cfqd->idle_slice_timer);
1087
1088                 max_dispatch = cfqd->cfq_quantum;
1089                 if (cfq_class_idle(cfqq))
1090                         max_dispatch = 1;
1091
1092                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1093         }
1094
1095         return dispatched;
1096 }
1097
1098 /*
1099  * task holds one reference to the queue, dropped when task exits. each rq
1100  * in-flight on this queue also holds a reference, dropped when rq is freed.
1101  *
1102  * queue lock must be held here.
1103  */
1104 static void cfq_put_queue(struct cfq_queue *cfqq)
1105 {
1106         struct cfq_data *cfqd = cfqq->cfqd;
1107
1108         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1109
1110         if (!atomic_dec_and_test(&cfqq->ref))
1111                 return;
1112
1113         BUG_ON(rb_first(&cfqq->sort_list));
1114         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1115         BUG_ON(cfq_cfqq_on_rr(cfqq));
1116
1117         if (unlikely(cfqd->active_queue == cfqq)) {
1118                 __cfq_slice_expired(cfqd, cfqq, 0, 0);
1119                 cfq_schedule_dispatch(cfqd);
1120         }
1121
1122         /*
1123          * it's on the empty list and still hashed
1124          */
1125         hlist_del(&cfqq->cfq_hash);
1126         kmem_cache_free(cfq_pool, cfqq);
1127 }
1128
1129 static struct cfq_queue *
1130 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1131                     const int hashval)
1132 {
1133         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1134         struct hlist_node *entry;
1135         struct cfq_queue *__cfqq;
1136
1137         hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1138                 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1139
1140                 if (__cfqq->key == key && (__p == prio || !prio))
1141                         return __cfqq;
1142         }
1143
1144         return NULL;
1145 }
1146
1147 static struct cfq_queue *
1148 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1149 {
1150         return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1151 }
1152
1153 static void cfq_free_io_context(struct io_context *ioc)
1154 {
1155         struct cfq_io_context *__cic;
1156         struct rb_node *n;
1157         int freed = 0;
1158
1159         while ((n = rb_first(&ioc->cic_root)) != NULL) {
1160                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1161                 rb_erase(&__cic->rb_node, &ioc->cic_root);
1162                 kmem_cache_free(cfq_ioc_pool, __cic);
1163                 freed++;
1164         }
1165
1166         elv_ioc_count_mod(ioc_count, -freed);
1167
1168         if (ioc_gone && !elv_ioc_count_read(ioc_count))
1169                 complete(ioc_gone);
1170 }
1171
1172 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1173 {
1174         if (unlikely(cfqq == cfqd->active_queue)) {
1175                 __cfq_slice_expired(cfqd, cfqq, 0, 0);
1176                 cfq_schedule_dispatch(cfqd);
1177         }
1178
1179         cfq_put_queue(cfqq);
1180 }
1181
1182 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1183                                          struct cfq_io_context *cic)
1184 {
1185         list_del_init(&cic->queue_list);
1186         smp_wmb();
1187         cic->key = NULL;
1188
1189         if (cic->cfqq[ASYNC]) {
1190                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1191                 cic->cfqq[ASYNC] = NULL;
1192         }
1193
1194         if (cic->cfqq[SYNC]) {
1195                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1196                 cic->cfqq[SYNC] = NULL;
1197         }
1198 }
1199
1200
1201 /*
1202  * Called with interrupts disabled
1203  */
1204 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1205 {
1206         struct cfq_data *cfqd = cic->key;
1207
1208         if (cfqd) {
1209                 request_queue_t *q = cfqd->queue;
1210
1211                 spin_lock_irq(q->queue_lock);
1212                 __cfq_exit_single_io_context(cfqd, cic);
1213                 spin_unlock_irq(q->queue_lock);
1214         }
1215 }
1216
1217 static void cfq_exit_io_context(struct io_context *ioc)
1218 {
1219         struct cfq_io_context *__cic;
1220         struct rb_node *n;
1221
1222         /*
1223          * put the reference this task is holding to the various queues
1224          */
1225
1226         n = rb_first(&ioc->cic_root);
1227         while (n != NULL) {
1228                 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1229
1230                 cfq_exit_single_io_context(__cic);
1231                 n = rb_next(n);
1232         }
1233 }
1234
1235 static struct cfq_io_context *
1236 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1237 {
1238         struct cfq_io_context *cic;
1239
1240         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1241         if (cic) {
1242                 memset(cic, 0, sizeof(*cic));
1243                 cic->last_end_request = jiffies;
1244                 INIT_LIST_HEAD(&cic->queue_list);
1245                 cic->dtor = cfq_free_io_context;
1246                 cic->exit = cfq_exit_io_context;
1247                 elv_ioc_count_inc(ioc_count);
1248         }
1249
1250         return cic;
1251 }
1252
1253 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1254 {
1255         struct task_struct *tsk = current;
1256         int ioprio_class;
1257
1258         if (!cfq_cfqq_prio_changed(cfqq))
1259                 return;
1260
1261         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1262         switch (ioprio_class) {
1263                 default:
1264                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1265                 case IOPRIO_CLASS_NONE:
1266                         /*
1267                          * no prio set, place us in the middle of the BE classes
1268                          */
1269                         cfqq->ioprio = task_nice_ioprio(tsk);
1270                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1271                         break;
1272                 case IOPRIO_CLASS_RT:
1273                         cfqq->ioprio = task_ioprio(tsk);
1274                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1275                         break;
1276                 case IOPRIO_CLASS_BE:
1277                         cfqq->ioprio = task_ioprio(tsk);
1278                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1279                         break;
1280                 case IOPRIO_CLASS_IDLE:
1281                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1282                         cfqq->ioprio = 7;
1283                         cfq_clear_cfqq_idle_window(cfqq);
1284                         break;
1285         }
1286
1287         /*
1288          * keep track of original prio settings in case we have to temporarily
1289          * elevate the priority of this queue
1290          */
1291         cfqq->org_ioprio = cfqq->ioprio;
1292         cfqq->org_ioprio_class = cfqq->ioprio_class;
1293         cfq_clear_cfqq_prio_changed(cfqq);
1294 }
1295
1296 static inline void changed_ioprio(struct cfq_io_context *cic)
1297 {
1298         struct cfq_data *cfqd = cic->key;
1299         struct cfq_queue *cfqq;
1300         unsigned long flags;
1301
1302         if (unlikely(!cfqd))
1303                 return;
1304
1305         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1306
1307         cfqq = cic->cfqq[ASYNC];
1308         if (cfqq) {
1309                 struct cfq_queue *new_cfqq;
1310                 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1311                                          GFP_ATOMIC);
1312                 if (new_cfqq) {
1313                         cic->cfqq[ASYNC] = new_cfqq;
1314                         cfq_put_queue(cfqq);
1315                 }
1316         }
1317
1318         cfqq = cic->cfqq[SYNC];
1319         if (cfqq)
1320                 cfq_mark_cfqq_prio_changed(cfqq);
1321
1322         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1323 }
1324
1325 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1326 {
1327         struct cfq_io_context *cic;
1328         struct rb_node *n;
1329
1330         ioc->ioprio_changed = 0;
1331
1332         n = rb_first(&ioc->cic_root);
1333         while (n != NULL) {
1334                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1335
1336                 changed_ioprio(cic);
1337                 n = rb_next(n);
1338         }
1339 }
1340
1341 static struct cfq_queue *
1342 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1343               gfp_t gfp_mask)
1344 {
1345         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1346         struct cfq_queue *cfqq, *new_cfqq = NULL;
1347         unsigned short ioprio;
1348
1349 retry:
1350         ioprio = tsk->ioprio;
1351         cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1352
1353         if (!cfqq) {
1354                 if (new_cfqq) {
1355                         cfqq = new_cfqq;
1356                         new_cfqq = NULL;
1357                 } else if (gfp_mask & __GFP_WAIT) {
1358                         /*
1359                          * Inform the allocator of the fact that we will
1360                          * just repeat this allocation if it fails, to allow
1361                          * the allocator to do whatever it needs to attempt to
1362                          * free memory.
1363                          */
1364                         spin_unlock_irq(cfqd->queue->queue_lock);
1365                         new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1366                         spin_lock_irq(cfqd->queue->queue_lock);
1367                         goto retry;
1368                 } else {
1369                         cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1370                         if (!cfqq)
1371                                 goto out;
1372                 }
1373
1374                 memset(cfqq, 0, sizeof(*cfqq));
1375
1376                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1377                 INIT_LIST_HEAD(&cfqq->cfq_list);
1378                 RB_CLEAR_NODE(&cfqq->rb_node);
1379                 INIT_LIST_HEAD(&cfqq->fifo);
1380
1381                 cfqq->key = key;
1382                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1383                 atomic_set(&cfqq->ref, 0);
1384                 cfqq->cfqd = cfqd;
1385
1386                 if (key != CFQ_KEY_ASYNC)
1387                         cfq_mark_cfqq_idle_window(cfqq);
1388
1389                 cfq_mark_cfqq_prio_changed(cfqq);
1390                 cfq_mark_cfqq_queue_new(cfqq);
1391                 cfq_init_prio_data(cfqq);
1392         }
1393
1394         if (new_cfqq)
1395                 kmem_cache_free(cfq_pool, new_cfqq);
1396
1397         atomic_inc(&cfqq->ref);
1398 out:
1399         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1400         return cfqq;
1401 }
1402
1403 static void
1404 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1405 {
1406         WARN_ON(!list_empty(&cic->queue_list));
1407         rb_erase(&cic->rb_node, &ioc->cic_root);
1408         kmem_cache_free(cfq_ioc_pool, cic);
1409         elv_ioc_count_dec(ioc_count);
1410 }
1411
1412 static struct cfq_io_context *
1413 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1414 {
1415         struct rb_node *n;
1416         struct cfq_io_context *cic;
1417         void *k, *key = cfqd;
1418
1419 restart:
1420         n = ioc->cic_root.rb_node;
1421         while (n) {
1422                 cic = rb_entry(n, struct cfq_io_context, rb_node);
1423                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1424                 k = cic->key;
1425                 if (unlikely(!k)) {
1426                         cfq_drop_dead_cic(ioc, cic);
1427                         goto restart;
1428                 }
1429
1430                 if (key < k)
1431                         n = n->rb_left;
1432                 else if (key > k)
1433                         n = n->rb_right;
1434                 else
1435                         return cic;
1436         }
1437
1438         return NULL;
1439 }
1440
1441 static inline void
1442 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1443              struct cfq_io_context *cic)
1444 {
1445         struct rb_node **p;
1446         struct rb_node *parent;
1447         struct cfq_io_context *__cic;
1448         unsigned long flags;
1449         void *k;
1450
1451         cic->ioc = ioc;
1452         cic->key = cfqd;
1453
1454 restart:
1455         parent = NULL;
1456         p = &ioc->cic_root.rb_node;
1457         while (*p) {
1458                 parent = *p;
1459                 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1460                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1461                 k = __cic->key;
1462                 if (unlikely(!k)) {
1463                         cfq_drop_dead_cic(ioc, __cic);
1464                         goto restart;
1465                 }
1466
1467                 if (cic->key < k)
1468                         p = &(*p)->rb_left;
1469                 else if (cic->key > k)
1470                         p = &(*p)->rb_right;
1471                 else
1472                         BUG();
1473         }
1474
1475         rb_link_node(&cic->rb_node, parent, p);
1476         rb_insert_color(&cic->rb_node, &ioc->cic_root);
1477
1478         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1479         list_add(&cic->queue_list, &cfqd->cic_list);
1480         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1481 }
1482
1483 /*
1484  * Setup general io context and cfq io context. There can be several cfq
1485  * io contexts per general io context, if this process is doing io to more
1486  * than one device managed by cfq.
1487  */
1488 static struct cfq_io_context *
1489 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1490 {
1491         struct io_context *ioc = NULL;
1492         struct cfq_io_context *cic;
1493
1494         might_sleep_if(gfp_mask & __GFP_WAIT);
1495
1496         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1497         if (!ioc)
1498                 return NULL;
1499
1500         cic = cfq_cic_rb_lookup(cfqd, ioc);
1501         if (cic)
1502                 goto out;
1503
1504         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1505         if (cic == NULL)
1506                 goto err;
1507
1508         cfq_cic_link(cfqd, ioc, cic);
1509 out:
1510         smp_read_barrier_depends();
1511         if (unlikely(ioc->ioprio_changed))
1512                 cfq_ioc_set_ioprio(ioc);
1513
1514         return cic;
1515 err:
1516         put_io_context(ioc);
1517         return NULL;
1518 }
1519
1520 static void
1521 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1522 {
1523         unsigned long elapsed = jiffies - cic->last_end_request;
1524         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1525
1526         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1527         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1528         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1529 }
1530
1531 static void
1532 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1533                        struct request *rq)
1534 {
1535         sector_t sdist;
1536         u64 total;
1537
1538         if (cic->last_request_pos < rq->sector)
1539                 sdist = rq->sector - cic->last_request_pos;
1540         else
1541                 sdist = cic->last_request_pos - rq->sector;
1542
1543         if (!cic->seek_samples) {
1544                 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1545                 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1546         }
1547
1548         /*
1549          * Don't allow the seek distance to get too large from the
1550          * odd fragment, pagein, etc
1551          */
1552         if (cic->seek_samples <= 60) /* second&third seek */
1553                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1554         else
1555                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1556
1557         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1558         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1559         total = cic->seek_total + (cic->seek_samples/2);
1560         do_div(total, cic->seek_samples);
1561         cic->seek_mean = (sector_t)total;
1562 }
1563
1564 /*
1565  * Disable idle window if the process thinks too long or seeks so much that
1566  * it doesn't matter
1567  */
1568 static void
1569 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1570                        struct cfq_io_context *cic)
1571 {
1572         int enable_idle = cfq_cfqq_idle_window(cfqq);
1573
1574         if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1575             (cfqd->hw_tag && CIC_SEEKY(cic)))
1576                 enable_idle = 0;
1577         else if (sample_valid(cic->ttime_samples)) {
1578                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1579                         enable_idle = 0;
1580                 else
1581                         enable_idle = 1;
1582         }
1583
1584         if (enable_idle)
1585                 cfq_mark_cfqq_idle_window(cfqq);
1586         else
1587                 cfq_clear_cfqq_idle_window(cfqq);
1588 }
1589
1590 /*
1591  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1592  * no or if we aren't sure, a 1 will cause a preempt.
1593  */
1594 static int
1595 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1596                    struct request *rq)
1597 {
1598         struct cfq_queue *cfqq;
1599
1600         cfqq = cfqd->active_queue;
1601         if (!cfqq)
1602                 return 0;
1603
1604         if (cfq_slice_used(cfqq))
1605                 return 1;
1606
1607         if (cfq_class_idle(new_cfqq))
1608                 return 0;
1609
1610         if (cfq_class_idle(cfqq))
1611                 return 1;
1612
1613         /*
1614          * if the new request is sync, but the currently running queue is
1615          * not, let the sync request have priority.
1616          */
1617         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1618                 return 1;
1619
1620         /*
1621          * So both queues are sync. Let the new request get disk time if
1622          * it's a metadata request and the current queue is doing regular IO.
1623          */
1624         if (rq_is_meta(rq) && !cfqq->meta_pending)
1625                 return 1;
1626
1627         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1628                 return 0;
1629
1630         /*
1631          * if this request is as-good as one we would expect from the
1632          * current cfqq, let it preempt
1633          */
1634         if (cfq_rq_close(cfqd, rq))
1635                 return 1;
1636
1637         return 0;
1638 }
1639
1640 /*
1641  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1642  * let it have half of its nominal slice.
1643  */
1644 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1645 {
1646         cfq_slice_expired(cfqd, 1, 1);
1647
1648         /*
1649          * Put the new queue at the front of the of the current list,
1650          * so we know that it will be selected next.
1651          */
1652         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1653         list_del_init(&cfqq->cfq_list);
1654         list_add(&cfqq->cfq_list, &cfqd->cur_rr);
1655
1656         cfqq->slice_end = 0;
1657         cfq_mark_cfqq_slice_new(cfqq);
1658 }
1659
1660 /*
1661  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1662  * something we should do about it
1663  */
1664 static void
1665 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1666                 struct request *rq)
1667 {
1668         struct cfq_io_context *cic = RQ_CIC(rq);
1669
1670         if (rq_is_meta(rq))
1671                 cfqq->meta_pending++;
1672
1673         cfq_update_io_thinktime(cfqd, cic);
1674         cfq_update_io_seektime(cfqd, cic, rq);
1675         cfq_update_idle_window(cfqd, cfqq, cic);
1676
1677         cic->last_request_pos = rq->sector + rq->nr_sectors;
1678         cfqq->last_request_pos = cic->last_request_pos;
1679
1680         if (cfqq == cfqd->active_queue) {
1681                 /*
1682                  * if we are waiting for a request for this queue, let it rip
1683                  * immediately and flag that we must not expire this queue
1684                  * just now
1685                  */
1686                 if (cfq_cfqq_wait_request(cfqq)) {
1687                         cfq_mark_cfqq_must_dispatch(cfqq);
1688                         del_timer(&cfqd->idle_slice_timer);
1689                         blk_start_queueing(cfqd->queue);
1690                 }
1691         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1692                 /*
1693                  * not the active queue - expire current slice if it is
1694                  * idle and has expired it's mean thinktime or this new queue
1695                  * has some old slice time left and is of higher priority
1696                  */
1697                 cfq_preempt_queue(cfqd, cfqq);
1698                 cfq_mark_cfqq_must_dispatch(cfqq);
1699                 blk_start_queueing(cfqd->queue);
1700         }
1701 }
1702
1703 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1704 {
1705         struct cfq_data *cfqd = q->elevator->elevator_data;
1706         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1707
1708         cfq_init_prio_data(cfqq);
1709
1710         cfq_add_rq_rb(rq);
1711
1712         list_add_tail(&rq->queuelist, &cfqq->fifo);
1713
1714         cfq_rq_enqueued(cfqd, cfqq, rq);
1715 }
1716
1717 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1718 {
1719         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1720         struct cfq_data *cfqd = cfqq->cfqd;
1721         const int sync = rq_is_sync(rq);
1722         unsigned long now;
1723
1724         now = jiffies;
1725
1726         WARN_ON(!cfqd->rq_in_driver);
1727         WARN_ON(!cfqq->dispatched);
1728         cfqd->rq_in_driver--;
1729         cfqq->dispatched--;
1730
1731         if (!cfq_class_idle(cfqq))
1732                 cfqd->last_end_request = now;
1733
1734         if (sync)
1735                 RQ_CIC(rq)->last_end_request = now;
1736
1737         /*
1738          * If this is the active queue, check if it needs to be expired,
1739          * or if we want to idle in case it has no pending requests.
1740          */
1741         if (cfqd->active_queue == cfqq) {
1742                 if (cfq_cfqq_slice_new(cfqq)) {
1743                         cfq_set_prio_slice(cfqd, cfqq);
1744                         cfq_clear_cfqq_slice_new(cfqq);
1745                 }
1746                 if (cfq_slice_used(cfqq))
1747                         cfq_slice_expired(cfqd, 0, 1);
1748                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1749                         cfq_arm_slice_timer(cfqd);
1750         }
1751
1752         if (!cfqd->rq_in_driver)
1753                 cfq_schedule_dispatch(cfqd);
1754 }
1755
1756 /*
1757  * we temporarily boost lower priority queues if they are holding fs exclusive
1758  * resources. they are boosted to normal prio (CLASS_BE/4)
1759  */
1760 static void cfq_prio_boost(struct cfq_queue *cfqq)
1761 {
1762         if (has_fs_excl()) {
1763                 /*
1764                  * boost idle prio on transactions that would lock out other
1765                  * users of the filesystem
1766                  */
1767                 if (cfq_class_idle(cfqq))
1768                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1769                 if (cfqq->ioprio > IOPRIO_NORM)
1770                         cfqq->ioprio = IOPRIO_NORM;
1771         } else {
1772                 /*
1773                  * check if we need to unboost the queue
1774                  */
1775                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1776                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1777                 if (cfqq->ioprio != cfqq->org_ioprio)
1778                         cfqq->ioprio = cfqq->org_ioprio;
1779         }
1780 }
1781
1782 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1783 {
1784         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1785             !cfq_cfqq_must_alloc_slice(cfqq)) {
1786                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1787                 return ELV_MQUEUE_MUST;
1788         }
1789
1790         return ELV_MQUEUE_MAY;
1791 }
1792
1793 static int cfq_may_queue(request_queue_t *q, int rw)
1794 {
1795         struct cfq_data *cfqd = q->elevator->elevator_data;
1796         struct task_struct *tsk = current;
1797         struct cfq_queue *cfqq;
1798         unsigned int key;
1799
1800         key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1801
1802         /*
1803          * don't force setup of a queue from here, as a call to may_queue
1804          * does not necessarily imply that a request actually will be queued.
1805          * so just lookup a possibly existing queue, or return 'may queue'
1806          * if that fails
1807          */
1808         cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1809         if (cfqq) {
1810                 cfq_init_prio_data(cfqq);
1811                 cfq_prio_boost(cfqq);
1812
1813                 return __cfq_may_queue(cfqq);
1814         }
1815
1816         return ELV_MQUEUE_MAY;
1817 }
1818
1819 /*
1820  * queue lock held here
1821  */
1822 static void cfq_put_request(struct request *rq)
1823 {
1824         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1825
1826         if (cfqq) {
1827                 const int rw = rq_data_dir(rq);
1828
1829                 BUG_ON(!cfqq->allocated[rw]);
1830                 cfqq->allocated[rw]--;
1831
1832                 put_io_context(RQ_CIC(rq)->ioc);
1833
1834                 rq->elevator_private = NULL;
1835                 rq->elevator_private2 = NULL;
1836
1837                 cfq_put_queue(cfqq);
1838         }
1839 }
1840
1841 /*
1842  * Allocate cfq data structures associated with this request.
1843  */
1844 static int
1845 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1846 {
1847         struct cfq_data *cfqd = q->elevator->elevator_data;
1848         struct task_struct *tsk = current;
1849         struct cfq_io_context *cic;
1850         const int rw = rq_data_dir(rq);
1851         const int is_sync = rq_is_sync(rq);
1852         pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1853         struct cfq_queue *cfqq;
1854         unsigned long flags;
1855
1856         might_sleep_if(gfp_mask & __GFP_WAIT);
1857
1858         cic = cfq_get_io_context(cfqd, gfp_mask);
1859
1860         spin_lock_irqsave(q->queue_lock, flags);
1861
1862         if (!cic)
1863                 goto queue_fail;
1864
1865         if (!cic->cfqq[is_sync]) {
1866                 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1867                 if (!cfqq)
1868                         goto queue_fail;
1869
1870                 cic->cfqq[is_sync] = cfqq;
1871         } else
1872                 cfqq = cic->cfqq[is_sync];
1873
1874         cfqq->allocated[rw]++;
1875         cfq_clear_cfqq_must_alloc(cfqq);
1876         atomic_inc(&cfqq->ref);
1877
1878         spin_unlock_irqrestore(q->queue_lock, flags);
1879
1880         rq->elevator_private = cic;
1881         rq->elevator_private2 = cfqq;
1882         return 0;
1883
1884 queue_fail:
1885         if (cic)
1886                 put_io_context(cic->ioc);
1887
1888         cfq_schedule_dispatch(cfqd);
1889         spin_unlock_irqrestore(q->queue_lock, flags);
1890         return 1;
1891 }
1892
1893 static void cfq_kick_queue(struct work_struct *work)
1894 {
1895         struct cfq_data *cfqd =
1896                 container_of(work, struct cfq_data, unplug_work);
1897         request_queue_t *q = cfqd->queue;
1898         unsigned long flags;
1899
1900         spin_lock_irqsave(q->queue_lock, flags);
1901         blk_start_queueing(q);
1902         spin_unlock_irqrestore(q->queue_lock, flags);
1903 }
1904
1905 /*
1906  * Timer running if the active_queue is currently idling inside its time slice
1907  */
1908 static void cfq_idle_slice_timer(unsigned long data)
1909 {
1910         struct cfq_data *cfqd = (struct cfq_data *) data;
1911         struct cfq_queue *cfqq;
1912         unsigned long flags;
1913         int timed_out = 1;
1914
1915         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1916
1917         if ((cfqq = cfqd->active_queue) != NULL) {
1918                 timed_out = 0;
1919
1920                 /*
1921                  * expired
1922                  */
1923                 if (cfq_slice_used(cfqq))
1924                         goto expire;
1925
1926                 /*
1927                  * only expire and reinvoke request handler, if there are
1928                  * other queues with pending requests
1929                  */
1930                 if (!cfqd->busy_queues)
1931                         goto out_cont;
1932
1933                 /*
1934                  * not expired and it has a request pending, let it dispatch
1935                  */
1936                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
1937                         cfq_mark_cfqq_must_dispatch(cfqq);
1938                         goto out_kick;
1939                 }
1940         }
1941 expire:
1942         cfq_slice_expired(cfqd, 0, timed_out);
1943 out_kick:
1944         cfq_schedule_dispatch(cfqd);
1945 out_cont:
1946         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1947 }
1948
1949 /*
1950  * Timer running if an idle class queue is waiting for service
1951  */
1952 static void cfq_idle_class_timer(unsigned long data)
1953 {
1954         struct cfq_data *cfqd = (struct cfq_data *) data;
1955         unsigned long flags, end;
1956
1957         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1958
1959         /*
1960          * race with a non-idle queue, reset timer
1961          */
1962         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1963         if (!time_after_eq(jiffies, end))
1964                 mod_timer(&cfqd->idle_class_timer, end);
1965         else
1966                 cfq_schedule_dispatch(cfqd);
1967
1968         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1969 }
1970
1971 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
1972 {
1973         del_timer_sync(&cfqd->idle_slice_timer);
1974         del_timer_sync(&cfqd->idle_class_timer);
1975         blk_sync_queue(cfqd->queue);
1976 }
1977
1978 static void cfq_exit_queue(elevator_t *e)
1979 {
1980         struct cfq_data *cfqd = e->elevator_data;
1981         request_queue_t *q = cfqd->queue;
1982
1983         cfq_shutdown_timer_wq(cfqd);
1984
1985         spin_lock_irq(q->queue_lock);
1986
1987         if (cfqd->active_queue)
1988                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0, 0);
1989
1990         while (!list_empty(&cfqd->cic_list)) {
1991                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
1992                                                         struct cfq_io_context,
1993                                                         queue_list);
1994
1995                 __cfq_exit_single_io_context(cfqd, cic);
1996         }
1997
1998         spin_unlock_irq(q->queue_lock);
1999
2000         cfq_shutdown_timer_wq(cfqd);
2001
2002         kfree(cfqd->cfq_hash);
2003         kfree(cfqd);
2004 }
2005
2006 static void *cfq_init_queue(request_queue_t *q)
2007 {
2008         struct cfq_data *cfqd;
2009         int i;
2010
2011         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
2012         if (!cfqd)
2013                 return NULL;
2014
2015         memset(cfqd, 0, sizeof(*cfqd));
2016
2017         cfqd->service_tree = RB_ROOT;
2018         INIT_LIST_HEAD(&cfqd->cur_rr);
2019         INIT_LIST_HEAD(&cfqd->idle_rr);
2020         INIT_LIST_HEAD(&cfqd->cic_list);
2021
2022         cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
2023         if (!cfqd->cfq_hash)
2024                 goto out_free;
2025
2026         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2027                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2028
2029         cfqd->queue = q;
2030
2031         init_timer(&cfqd->idle_slice_timer);
2032         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2033         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2034
2035         init_timer(&cfqd->idle_class_timer);
2036         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2037         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2038
2039         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2040
2041         cfqd->cfq_quantum = cfq_quantum;
2042         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2043         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2044         cfqd->cfq_back_max = cfq_back_max;
2045         cfqd->cfq_back_penalty = cfq_back_penalty;
2046         cfqd->cfq_slice[0] = cfq_slice_async;
2047         cfqd->cfq_slice[1] = cfq_slice_sync;
2048         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2049         cfqd->cfq_slice_idle = cfq_slice_idle;
2050
2051         return cfqd;
2052 out_free:
2053         kfree(cfqd);
2054         return NULL;
2055 }
2056
2057 static void cfq_slab_kill(void)
2058 {
2059         if (cfq_pool)
2060                 kmem_cache_destroy(cfq_pool);
2061         if (cfq_ioc_pool)
2062                 kmem_cache_destroy(cfq_ioc_pool);
2063 }
2064
2065 static int __init cfq_slab_setup(void)
2066 {
2067         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2068                                         NULL, NULL);
2069         if (!cfq_pool)
2070                 goto fail;
2071
2072         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2073                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2074         if (!cfq_ioc_pool)
2075                 goto fail;
2076
2077         return 0;
2078 fail:
2079         cfq_slab_kill();
2080         return -ENOMEM;
2081 }
2082
2083 /*
2084  * sysfs parts below -->
2085  */
2086 static ssize_t
2087 cfq_var_show(unsigned int var, char *page)
2088 {
2089         return sprintf(page, "%d\n", var);
2090 }
2091
2092 static ssize_t
2093 cfq_var_store(unsigned int *var, const char *page, size_t count)
2094 {
2095         char *p = (char *) page;
2096
2097         *var = simple_strtoul(p, &p, 10);
2098         return count;
2099 }
2100
2101 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2102 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2103 {                                                                       \
2104         struct cfq_data *cfqd = e->elevator_data;                       \
2105         unsigned int __data = __VAR;                                    \
2106         if (__CONV)                                                     \
2107                 __data = jiffies_to_msecs(__data);                      \
2108         return cfq_var_show(__data, (page));                            \
2109 }
2110 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2111 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2112 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2113 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2114 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2115 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2116 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2117 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2118 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2119 #undef SHOW_FUNCTION
2120
2121 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2122 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2123 {                                                                       \
2124         struct cfq_data *cfqd = e->elevator_data;                       \
2125         unsigned int __data;                                            \
2126         int ret = cfq_var_store(&__data, (page), count);                \
2127         if (__data < (MIN))                                             \
2128                 __data = (MIN);                                         \
2129         else if (__data > (MAX))                                        \
2130                 __data = (MAX);                                         \
2131         if (__CONV)                                                     \
2132                 *(__PTR) = msecs_to_jiffies(__data);                    \
2133         else                                                            \
2134                 *(__PTR) = __data;                                      \
2135         return ret;                                                     \
2136 }
2137 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2138 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2139 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2140 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2141 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2142 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2143 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2144 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2145 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2146 #undef STORE_FUNCTION
2147
2148 #define CFQ_ATTR(name) \
2149         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2150
2151 static struct elv_fs_entry cfq_attrs[] = {
2152         CFQ_ATTR(quantum),
2153         CFQ_ATTR(fifo_expire_sync),
2154         CFQ_ATTR(fifo_expire_async),
2155         CFQ_ATTR(back_seek_max),
2156         CFQ_ATTR(back_seek_penalty),
2157         CFQ_ATTR(slice_sync),
2158         CFQ_ATTR(slice_async),
2159         CFQ_ATTR(slice_async_rq),
2160         CFQ_ATTR(slice_idle),
2161         __ATTR_NULL
2162 };
2163
2164 static struct elevator_type iosched_cfq = {
2165         .ops = {
2166                 .elevator_merge_fn =            cfq_merge,
2167                 .elevator_merged_fn =           cfq_merged_request,
2168                 .elevator_merge_req_fn =        cfq_merged_requests,
2169                 .elevator_allow_merge_fn =      cfq_allow_merge,
2170                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2171                 .elevator_add_req_fn =          cfq_insert_request,
2172                 .elevator_activate_req_fn =     cfq_activate_request,
2173                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2174                 .elevator_queue_empty_fn =      cfq_queue_empty,
2175                 .elevator_completed_req_fn =    cfq_completed_request,
2176                 .elevator_former_req_fn =       elv_rb_former_request,
2177                 .elevator_latter_req_fn =       elv_rb_latter_request,
2178                 .elevator_set_req_fn =          cfq_set_request,
2179                 .elevator_put_req_fn =          cfq_put_request,
2180                 .elevator_may_queue_fn =        cfq_may_queue,
2181                 .elevator_init_fn =             cfq_init_queue,
2182                 .elevator_exit_fn =             cfq_exit_queue,
2183                 .trim =                         cfq_free_io_context,
2184         },
2185         .elevator_attrs =       cfq_attrs,
2186         .elevator_name =        "cfq",
2187         .elevator_owner =       THIS_MODULE,
2188 };
2189
2190 static int __init cfq_init(void)
2191 {
2192         int ret;
2193
2194         /*
2195          * could be 0 on HZ < 1000 setups
2196          */
2197         if (!cfq_slice_async)
2198                 cfq_slice_async = 1;
2199         if (!cfq_slice_idle)
2200                 cfq_slice_idle = 1;
2201
2202         if (cfq_slab_setup())
2203                 return -ENOMEM;
2204
2205         ret = elv_register(&iosched_cfq);
2206         if (ret)
2207                 cfq_slab_kill();
2208
2209         return ret;
2210 }
2211
2212 static void __exit cfq_exit(void)
2213 {
2214         DECLARE_COMPLETION_ONSTACK(all_gone);
2215         elv_unregister(&iosched_cfq);
2216         ioc_gone = &all_gone;
2217         /* ioc_gone's update must be visible before reading ioc_count */
2218         smp_wmb();
2219         if (elv_ioc_count_read(ioc_count))
2220                 wait_for_completion(ioc_gone);
2221         synchronize_rcu();
2222         cfq_slab_kill();
2223 }
2224
2225 module_init(cfq_init);
2226 module_exit(cfq_exit);
2227
2228 MODULE_AUTHOR("Jens Axboe");
2229 MODULE_LICENSE("GPL");
2230 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");