projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix m32r __xchg
[powerpc.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index
629614a
..
9474b23
100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-465,7
+465,7
@@
struct rq {
u64 clock, prev_clock_raw;
s64 clock_max_delta;
u64 clock, prev_clock_raw;
s64 clock_max_delta;
- unsigned int clock_warps, clock_overflows;
+ unsigned int clock_warps, clock_overflows
, clock_underflows
;
u64 idle_clock;
unsigned int clock_deep_idle_events;
u64 tick_timestamp;
u64 idle_clock;
unsigned int clock_deep_idle_events;
u64 tick_timestamp;
@@
-858,7
+858,6
@@
void sched_clock_idle_wakeup_event(u64 delta_ns)
struct rq *rq = cpu_rq(smp_processor_id());
u64 now = sched_clock();
struct rq *rq = cpu_rq(smp_processor_id());
u64 now = sched_clock();
- touch_softlockup_watchdog();
rq->idle_clock += delta_ns;
/*
* Override the previous timestamp and ignore all
rq->idle_clock += delta_ns;
/*
* Override the previous timestamp and ignore all
@@
-870,6
+869,7
@@
void sched_clock_idle_wakeup_event(u64 delta_ns)
rq->prev_clock_raw = now;
rq->clock += delta_ns;
spin_unlock(&rq->lock);
rq->prev_clock_raw = now;
rq->clock += delta_ns;
spin_unlock(&rq->lock);
+ touch_softlockup_watchdog();
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
@@
-1255,12
+1255,12
@@
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
#define sched_class_highest (&rt_sched_class)
-static void inc_nr_running(struct
task_struct *p, struct
rq *rq)
+static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
{
rq->nr_running++;
}
-static void dec_nr_running(struct
task_struct *p, struct
rq *rq)
+static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
{
rq->nr_running--;
}
@@
-1350,11
+1350,11
@@
static int effective_prio(struct task_struct *p)
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
- if (
p->state == TASK_UNINTERRUPTIBLE
)
+ if (
task_contributes_to_load(p)
)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(
p,
rq);
+ inc_nr_running(rq);
}
/*
}
/*
@@
-1362,11
+1362,11
@@
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
- if (
p->state == TASK_UNINTERRUPTIBLE
)
+ if (
task_contributes_to_load(p)
)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(
p,
rq);
+ dec_nr_running(rq);
}
/**
}
/**
@@
-1895,8
+1895,7
@@
out:
int fastcall wake_up_process(struct task_struct *p)
{
int fastcall wake_up_process(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
- TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+ return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
}
EXPORT_SYMBOL(wake_up_process);
@@
-2006,7
+2005,7
@@
void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(
p,
rq);
+ inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
@@
-3736,8
+3735,10
@@
void scheduler_tick(void)
/*
* Let rq->clock advance by at least TICK_NSEC:
*/
/*
* Let rq->clock advance by at least TICK_NSEC:
*/
- if (unlikely(rq->clock < next_tick))
+ if (unlikely(rq->clock < next_tick))
{
rq->clock = next_tick;
rq->clock = next_tick;
+ rq->clock_underflows++;
+ }
rq->tick_timestamp = rq->clock;
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
rq->tick_timestamp = rq->clock;
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
@@
-4122,8
+4123,7
@@
void complete(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 1, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@
-4134,8
+4134,7
@@
void complete_all(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 0, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@
-4149,8
+4148,10
@@
do_wait_for_common(struct completion *x, long timeout, int state)
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
- if (state == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if ((state == TASK_INTERRUPTIBLE &&
+ signal_pending(current)) ||
+ (state == TASK_KILLABLE &&
+ fatal_signal_pending(current))) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
@@
-4210,6
+4211,15
@@
wait_for_completion_interruptible_timeout(struct completion *x,
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+int __sched wait_for_completion_killable(struct completion *x)
+{
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+ if (t == -ERESTARTSYS)
+ return t;
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_completion_killable);
+
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
@@
-4943,19
+4953,15
@@
EXPORT_SYMBOL(_cond_resched);
*/
int cond_resched_lock(spinlock_t *lock)
{
*/
int cond_resched_lock(spinlock_t *lock)
{
+ int resched = need_resched() && system_state == SYSTEM_RUNNING;
int ret = 0;
int ret = 0;
- if (
need_lockbreak(lock)
) {
+ if (
spin_needbreak(lock) || resched
) {
spin_unlock(lock);
spin_unlock(lock);
- cpu_relax();
- ret = 1;
- spin_lock(lock);
- }
- if (need_resched() && system_state == SYSTEM_RUNNING) {
- spin_release(&lock->dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- __cond_resched();
+ if (resched && need_resched())
+ __cond_resched();
+ else
+ cpu_relax();
ret = 1;
spin_lock(lock);
}
ret = 1;
spin_lock(lock);
}
@@
-5159,8
+5165,7
@@
void sched_show_task(struct task_struct *p)
printk(KERN_CONT "%5lu %5d %6d\n", free,
task_pid_nr(p), task_pid_nr(p->real_parent));
printk(KERN_CONT "%5lu %5d %6d\n", free,
task_pid_nr(p), task_pid_nr(p->real_parent));
- if (state != TASK_RUNNING)
- show_stack(p, NULL);
+ show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
}
void show_state_filter(unsigned long state_filter)