cfq-iosched: improve preemption for cooperating tasks
[powerpc.git] / block / cfq-iosched.c
index b6491c0..bfb3967 100644 (file)
@@ -532,6 +532,12 @@ static void cfq_add_rq_rb(struct request *rq)
 
        if (!cfq_cfqq_on_rr(cfqq))
                cfq_add_cfqq_rr(cfqd, cfqq);
+
+       /*
+        * check if this request is a better next-serve candidate
+        */
+       cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+       BUG_ON(!cfqq->next_rq);
 }
 
 static inline void
@@ -861,15 +867,11 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd)
 
 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
 {
-       struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
        cfq_remove_request(rq);
        cfqq->on_dispatch[rq_is_sync(rq)]++;
        elv_dispatch_sort(q, rq);
-
-       rq = list_entry(q->queue_head.prev, struct request, queuelist);
-       cfqd->last_sector = rq->sector + rq->nr_sectors;
 }
 
 /*
@@ -986,9 +988,9 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
         * expire an async queue immediately if it has used up its slice. idle
         * queue always expire after 1 dispatch round.
         */
-       if ((!cfq_cfqq_sync(cfqq) &&
+       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
            cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq)) {
+           cfq_class_idle(cfqq))) {
                cfqq->slice_end = jiffies + 1;
                cfq_slice_expired(cfqd, 0, 0);
        }
@@ -1051,19 +1053,21 @@ cfq_dispatch_requests(request_queue_t *q, int force)
        while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
                int max_dispatch;
 
-               /*
-                * Don't repeat dispatch from the previous queue.
-                */
-               if (prev_cfqq == cfqq)
-                       break;
+               if (cfqd->busy_queues > 1) {
+                       /*
+                        * Don't repeat dispatch from the previous queue.
+                        */
+                       if (prev_cfqq == cfqq)
+                               break;
 
-               /*
-                * So we have dispatched before in this round, if the
-                * next queue has idling enabled (must be sync), don't
-                * allow it service until the previous have continued.
-                */
-               if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
-                       break;
+                       /*
+                        * So we have dispatched before in this round, if the
+                        * next queue has idling enabled (must be sync), don't
+                        * allow it service until the previous have continued.
+                        */
+                       if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
+                               break;
+               }
 
                cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_wait_request(cfqq);
@@ -1370,7 +1374,9 @@ retry:
                atomic_set(&cfqq->ref, 0);
                cfqq->cfqd = cfqd;
 
-               cfq_mark_cfqq_idle_window(cfqq);
+               if (key != CFQ_KEY_ASYNC)
+                       cfq_mark_cfqq_idle_window(cfqq);
+
                cfq_mark_cfqq_prio_changed(cfqq);
                cfq_mark_cfqq_queue_new(cfqq);
                cfq_init_prio_data(cfqq);
@@ -1575,6 +1581,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                   struct request *rq)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
+       sector_t dist;
 
        if (cfq_class_idle(new_cfqq))
                return 0;
@@ -1584,14 +1591,14 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 
        if (cfq_class_idle(cfqq))
                return 1;
-       if (!cfq_cfqq_wait_request(new_cfqq))
-               return 0;
+
        /*
         * if the new request is sync, but the currently running queue is
         * not, let the sync request have priority.
         */
        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
                return 1;
+
        /*
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
@@ -1599,6 +1606,21 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if (rq_is_meta(rq) && !cfqq->meta_pending)
                return 1;
 
+       if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
+               return 0;
+
+       /*
+        * if this request is as-good as one we would expect from the
+        * current cfqq, let it preempt
+        */
+       if (rq->sector > cfqd->last_sector)
+               dist = rq->sector - cfqd->last_sector;
+       else
+               dist = cfqd->last_sector - rq->sector;
+
+       if (dist <= cfqd->active_cic->seek_mean)
+               return 1;
+
        return 0;
 }
 
@@ -1634,12 +1656,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        if (rq_is_meta(rq))
                cfqq->meta_pending++;
 
-       /*
-        * check if this request is a better next-serve candidate)) {
-        */
-       cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
-       BUG_ON(!cfqq->next_rq);
-
        /*
         * we never wait for an async request and we don't allow preemption
         * of an async request. so just return early
@@ -1715,6 +1731,8 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
        cfqq->on_dispatch[sync]--;
        cfqq->service_last = now;
 
+       cfqd->last_sector = rq->hard_sector + rq->hard_nr_sectors;
+
        if (!cfq_class_idle(cfqq))
                cfqd->last_end_request = now;