[PATCH] powerpc: RTC memory corruption
[powerpc.git] / block / elevator.c
index 6c3fc8a..24b702d 100644 (file)
@@ -64,7 +64,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 }
 EXPORT_SYMBOL(elv_rq_merge_ok);
 
-inline int elv_try_merge(struct request *__rq, struct bio *bio)
+static inline int elv_try_merge(struct request *__rq, struct bio *bio)
 {
        int ret = ELEVATOR_NO_MERGE;
 
@@ -80,7 +80,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
 
        return ret;
 }
-EXPORT_SYMBOL(elv_try_merge);
 
 static struct elevator_type *elevator_find(const char *name)
 {
@@ -140,28 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
 
 static char chosen_elevator[16];
 
-static void elevator_setup_default(void)
+static int __init elevator_setup(char *str)
 {
-       struct elevator_type *e;
-
        /*
-        * If default has not been set, use the compiled-in selection.
+        * Be backwards-compatible with previous kernels, so users
+        * won't get the wrong elevator.
         */
-       if (!chosen_elevator[0])
-               strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-
-       /*
-        * If the given scheduler is not available, fall back to no-op.
-        */
-       if ((e = elevator_find(chosen_elevator)))
-               elevator_put(e);
+       if (!strcmp(str, "as"))
+               strcpy(chosen_elevator, "anticipatory");
        else
-               strcpy(chosen_elevator, "noop");
-}
-
-static int __init elevator_setup(char *str)
-{
-       strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+               strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
        return 0;
 }
 
@@ -178,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
        q->end_sector = 0;
        q->boundary_rq = NULL;
 
-       elevator_setup_default();
+       if (name && !(e = elevator_get(name)))
+               return -EINVAL;
 
-       if (!name)
-               name = chosen_elevator;
+       if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
+               printk("I/O scheduler %s not found\n", chosen_elevator);
 
-       e = elevator_get(name);
-       if (!e)
-               return -EINVAL;
+       if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
+               printk("Default I/O scheduler not found, using no-op\n");
+               e = elevator_get("noop");
+       }
 
        eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
        if (!eq) {
@@ -304,15 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
 
        rq->flags &= ~REQ_STARTED;
 
-       /*
-        * if this is the flush, requeue the original instead and drop the flush
-        */
-       if (rq->flags & REQ_BAR_FLUSH) {
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               rq = rq->end_io_data;
-       }
-
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
 static void elv_drain_elevator(request_queue_t *q)
@@ -329,28 +310,10 @@ static void elv_drain_elevator(request_queue_t *q)
        }
 }
 
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
-                      int plug)
+void elv_insert(request_queue_t *q, struct request *rq, int where)
 {
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
-               /*
-                * barriers implicitly indicate back insertion
-                */
-               if (where == ELEVATOR_INSERT_SORT)
-                       where = ELEVATOR_INSERT_BACK;
-
-               /*
-                * this request is scheduling boundary, update end_sector
-                */
-               if (blk_fs_request(rq)) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = rq;
-               }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
-               where = ELEVATOR_INSERT_BACK;
-
-       if (plug)
-               blk_plug_device(q);
+       struct list_head *pos;
+       unsigned ordseq;
 
        rq->q = q;
 
@@ -393,6 +356,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                q->elevator->ops->elevator_add_req_fn(q, rq);
                break;
 
+       case ELEVATOR_INSERT_REQUEUE:
+               /*
+                * If ordered flush isn't in progress, we do front
+                * insertion; otherwise, requests should be requeued
+                * in ordseq order.
+                */
+               rq->flags |= REQ_SOFTBARRIER;
+
+               if (q->ordseq == 0) {
+                       list_add(&rq->queuelist, &q->queue_head);
+                       break;
+               }
+
+               ordseq = blk_ordered_req_seq(rq);
+
+               list_for_each(pos, &q->queue_head) {
+                       struct request *pos_rq = list_entry_rq(pos);
+                       if (ordseq <= blk_ordered_req_seq(pos_rq))
+                               break;
+               }
+
+               list_add_tail(&rq->queuelist, pos);
+               break;
+
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
                       __FUNCTION__, where);
@@ -408,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
        }
 }
 
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+                      int plug)
+{
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * toggle ordered color
+                */
+               if (blk_barrier_rq(rq))
+                       q->ordcolor ^= 1;
+
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update
+                * end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+               where = ELEVATOR_INSERT_BACK;
+
+       if (plug)
+               blk_plug_device(q);
+
+       elv_insert(q, rq, where);
+}
+
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
                     int plug)
 {
@@ -422,25 +445,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
 {
        struct request *rq;
 
-       if (unlikely(list_empty(&q->queue_head) &&
-                    !q->elevator->ops->elevator_dispatch_fn(q, 0)))
-               return NULL;
-
-       rq = list_entry_rq(q->queue_head.next);
-
-       /*
-        * if this is a barrier write and the device has to issue a
-        * flush sequence to support it, check how far we are
-        */
-       if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
-               BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
+       while (1) {
+               while (!list_empty(&q->queue_head)) {
+                       rq = list_entry_rq(q->queue_head.next);
+                       if (blk_do_ordered(q, &rq))
+                               return rq;
+               }
 
-               if (q->ordered == QUEUE_ORDERED_FLUSH &&
-                   !blk_barrier_preflush(rq))
-                       rq = blk_start_pre_flush(q, rq);
+               if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+                       return NULL;
        }
-
-       return rq;
 }
 
 struct request *elv_next_request(request_queue_t *q)
@@ -498,7 +512,7 @@ struct request *elv_next_request(request_queue_t *q)
                        blkdev_dequeue_request(rq);
                        rq->flags |= REQ_QUIET;
                        end_that_request_chunk(rq, 0, nr_bytes);
-                       end_that_request_last(rq);
+                       end_that_request_last(rq, 0);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
                                                                ret);
@@ -597,6 +611,20 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
                if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
                        e->ops->elevator_completed_req_fn(q, rq);
        }
+
+       /*
+        * Check if the queue is waiting for fs requests to be
+        * drained for flush sequence.
+        */
+       if (unlikely(q->ordseq)) {
+               struct request *first_rq = list_entry_rq(q->queue_head.next);
+               if (q->in_flight == 0 &&
+                   blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
+                   blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
+                       blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
+                       q->request_fn(q);
+               }
+       }
 }
 
 int elv_register_queue(struct request_queue *q)
@@ -631,8 +659,10 @@ int elv_register(struct elevator_type *e)
        spin_unlock_irq(&elv_list_lock);
 
        printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
-       if (!strcmp(e->elevator_name, chosen_elevator))
-               printk(" (default)");
+       if (!strcmp(e->elevator_name, chosen_elevator) ||
+                       (!*chosen_elevator &&
+                        !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+                               printk(" (default)");
        printk("\n");
        return 0;
 }