projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[ALSA] Fix invalid assignment of PCI revision
[powerpc.git]
/
block
/
cfq-iosched.c
diff --git
a/block/cfq-iosched.c
b/block/cfq-iosched.c
index
1411048
..
533a293
100644
(file)
--- a/
block/cfq-iosched.c
+++ b/
block/cfq-iosched.c
@@
-4,7
+4,7
@@
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * Copyright (C) 2003 Jens Axboe <axboe@
suse.de
>
+ * Copyright (C) 2003 Jens Axboe <axboe@
kernel.dk
>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
@@
-43,8
+43,8
@@
static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
-static
kmem_cache_t
*cfq_pool;
-static
kmem_cache_t
*cfq_ioc_pool;
+static
struct kmem_cache
*cfq_pool;
+static
struct kmem_cache
*cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
@@
-141,6
+141,8
@@
struct cfq_queue {
int queued[2];
/* currently allocated requests */
int allocated[2];
int queued[2];
/* currently allocated requests */
int allocated[2];
+ /* pending metadata requests */
+ int meta_pending;
/* fifo list of requests in sort_list */
struct list_head fifo;
/* fifo list of requests in sort_list */
struct list_head fifo;
@@
-217,9
+219,12
@@
static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues;
}
return !cfqd->busy_queues;
}
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw
, int is_sync
)
{
{
- if (rw == READ || rw == WRITE_SYNC)
+ /*
+ * Use the per-process queue, for read requests and syncronous writes
+ */
+ if (!(rw & REQ_RW) || is_sync)
return task->pid;
return CFQ_KEY_ASYNC;
return task->pid;
return CFQ_KEY_ASYNC;
@@
-248,6
+253,10
@@
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
+ if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ return rq1;
+ else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ return rq2;
s1 = rq1->sector;
s2 = rq2->sector;
s1 = rq1->sector;
s2 = rq2->sector;
@@
-450,6
+459,9
@@
static void cfq_add_rq_rb(struct request *rq)
*/
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
cfq_dispatch_insert(cfqd->queue, __alias);
*/
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
cfq_dispatch_insert(cfqd->queue, __alias);
+
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
}
static inline void
}
static inline void
@@
-464,7
+476,7
@@
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
- pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+ pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)
, bio_sync(bio)
);
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@
-510,6
+522,11
@@
static void cfq_remove_request(struct request *rq)
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
+
+ if (rq_is_meta(rq)) {
+ WARN_ON(!cfqq->meta_pending);
+ cfqq->meta_pending--;
+ }
}
static int
}
static int
@@
-1204,11
+1221,12
@@
static inline void changed_ioprio(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
+ unsigned long flags;
if (unlikely(!cfqd))
return;
if (unlikely(!cfqd))
return;
- spin_lock
(cfqd->queue->queue_lock
);
+ spin_lock
_irqsave(cfqd->queue->queue_lock, flags
);
cfqq = cic->cfqq[ASYNC];
if (cfqq) {
cfqq = cic->cfqq[ASYNC];
if (cfqq) {
@@
-1225,7
+1243,7
@@
static inline void changed_ioprio(struct cfq_io_context *cic)
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
- spin_unlock
(cfqd->queue->queue_lock
);
+ spin_unlock
_irqrestore(cfqd->queue->queue_lock, flags
);
}
static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static void cfq_ioc_set_ioprio(struct io_context *ioc)
@@
-1351,6
+1369,7
@@
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
struct rb_node **p;
struct rb_node *parent;
struct cfq_io_context *__cic;
struct rb_node **p;
struct rb_node *parent;
struct cfq_io_context *__cic;
+ unsigned long flags;
void *k;
cic->ioc = ioc;
void *k;
cic->ioc = ioc;
@@
-1380,9
+1399,9
@@
restart:
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
- spin_lock_irq
(cfqd->queue->queue_lock
);
+ spin_lock_irq
save(cfqd->queue->queue_lock, flags
);
list_add(&cic->queue_list, &cfqd->cic_list);
list_add(&cic->queue_list, &cfqd->cic_list);
- spin_unlock_irq
(cfqd->queue->queue_lock
);
+ spin_unlock_irq
restore(cfqd->queue->queue_lock, flags
);
}
/*
}
/*
@@
-1448,8
+1467,7
@@
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
}
static void
}
static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
{
sector_t sdist;
u64 total;
@@
-1527,8
+1545,18
@@
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
*/
if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
return 0;
*/
if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
return 0;
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if (rq_is_meta(rq) && !cfqq->meta_pending)
+ return 1;
return 0;
}
return 0;
}
@@
-1564,6
+1592,9
@@
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
struct cfq_io_context *cic = RQ_CIC(rq);
{
struct cfq_io_context *cic = RQ_CIC(rq);
+ if (rq_is_meta(rq))
+ cfqq->meta_pending++;
+
/*
* check if this request is a better next-serve candidate)) {
*/
/*
* check if this request is a better next-serve candidate)) {
*/
@@
-1588,7
+1619,7
@@
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
cfq_update_io_thinktime(cfqd, cic);
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(c
fqd, c
ic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
@@
-1626,9
+1657,6
@@
static void cfq_insert_request(request_queue_t *q, struct request *rq)
cfq_add_rq_rb(rq);
cfq_add_rq_rb(rq);
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
@@
-1723,6
+1751,9
@@
static int cfq_may_queue(request_queue_t *q, int rw)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
+ unsigned int key;
+
+ key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/*
* don't force setup of a queue from here, as a call to may_queue
/*
* don't force setup of a queue from here, as a call to may_queue
@@
-1730,7
+1761,7
@@
static int cfq_may_queue(request_queue_t *q, int rw)
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
- cfqq = cfq_find_cfq_hash(cfqd,
cfq_queue_pid(tsk, rw)
, tsk->ioprio);
+ cfqq = cfq_find_cfq_hash(cfqd,
key
, tsk->ioprio);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
@@
-1744,7
+1775,7
@@
static int cfq_may_queue(request_queue_t *q, int rw)
/*
* queue lock held here
*/
/*
* queue lock held here
*/
-static void cfq_put_request(
request_queue_t *q,
struct request *rq)
+static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@
-1773,10
+1804,10
@@
cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- pid_t key = cfq_queue_pid(tsk, rw);
+ const int is_sync = rq_is_sync(rq);
+ pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq;
unsigned long flags;
struct cfq_queue *cfqq;
unsigned long flags;
- int is_sync = key != CFQ_KEY_ASYNC;
might_sleep_if(gfp_mask & __GFP_WAIT);
might_sleep_if(gfp_mask & __GFP_WAIT);
@@
-1815,9
+1846,11
@@
queue_fail:
return 1;
}
return 1;
}
-static void cfq_kick_queue(
void *data
)
+static void cfq_kick_queue(
struct work_struct *work
)
{
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@
-1925,7
+1958,7
@@
static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q
, elevator_t *e
)
+static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
{
struct cfq_data *cfqd;
int i;
@@
-1961,7
+1994,7
@@
static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue
, q
);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@
-2136,7
+2169,7
@@
static int __init cfq_init(void)
static void __exit cfq_exit(void)
{
static void __exit cfq_exit(void)
{
- DECLARE_COMPLETION(all_gone);
+ DECLARE_COMPLETION
_ONSTACK
(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */