projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[PATCH] powerpc: legacy_serial loop cleanup
[powerpc.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index
4d46e90
..
78acdef
100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-49,6
+49,7
@@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/acct.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/acct.h>
+#include <linux/kprobes.h>
#include <asm/tlb.h>
#include <asm/unistd.h>
#include <asm/tlb.h>
#include <asm/unistd.h>
@@
-237,6
+238,7
@@
struct runqueue {
task_t *migration_thread;
struct list_head migration_queue;
task_t *migration_thread;
struct list_head migration_queue;
+ int cpu;
#endif
#ifdef CONFIG_SCHEDSTATS
#endif
#ifdef CONFIG_SCHEDSTATS
@@
-706,12
+708,6
@@
static int recalc_task_prio(task_t *p, unsigned long long now)
p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
DEF_TIMESLICE);
} else {
p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
DEF_TIMESLICE);
} else {
- /*
- * The lower the sleep avg a task has the more
- * rapidly it will rise with sleep time.
- */
- sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
/*
* Tasks waking from uninterruptible sleep are
* limited in their sleep_avg rise as they
/*
* Tasks waking from uninterruptible sleep are
* limited in their sleep_avg rise as they
@@
-1551,8
+1547,14
@@
static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
- if (unlikely(prev_task_flags & PF_DEAD))
+ if (unlikely(prev_task_flags & PF_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+ * task and put them back on the free list.
+ */
+ kprobe_flush_task(prev);
put_task_struct(prev);
put_task_struct(prev);
+ }
}
/**
}
/**
@@
-1660,6
+1662,9
@@
unsigned long nr_iowait(void)
/*
* double_rq_lock - safely lock two runqueues
*
/*
* double_rq_lock - safely lock two runqueues
*
+ * We must take them in cpu order to match code in
+ * dependent_sleeper and wake_dependent_sleeper.
+ *
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
@@
-1671,7
+1676,7
@@
static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
- if (rq1
< rq2
) {
+ if (rq1
->cpu < rq2->cpu
) {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
@@
-1707,7
+1712,7
@@
static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
__acquires(this_rq->lock)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
__acquires(this_rq->lock)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
- if (busiest
< this_rq
) {
+ if (busiest
->cpu < this_rq->cpu
) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
@@
-2875,7
+2880,7
@@
asmlinkage void __sched schedule(void)
*/
if (likely(!current->exit_state)) {
if (unlikely(in_atomic())) {
*/
if (likely(!current->exit_state)) {
if (unlikely(in_atomic())) {
- printk(KERN_ERR "scheduling while atomic: "
+ printk(KERN_ERR "
BUG:
scheduling while atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(), current->pid);
dump_stack();
"%s/0x%08x/%d\n",
current->comm, preempt_count(), current->pid);
dump_stack();
@@
-6035,6
+6040,7
@@
void __init sched_init(void)
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
+ rq->cpu = i;
#endif
atomic_set(&rq->nr_iowait, 0);
#endif
atomic_set(&rq->nr_iowait, 0);
@@
-6075,7
+6081,7
@@
void __might_sleep(char *file, int line)
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
- printk(KERN_ERR "
Debug
: sleeping function called from invalid"
+ printk(KERN_ERR "
BUG
: sleeping function called from invalid"
" context at %s:%d\n", file, line);
printk("in_atomic():%d, irqs_disabled():%d\n",
in_atomic(), irqs_disabled());
" context at %s:%d\n", file, line);
printk("in_atomic():%d, irqs_disabled():%d\n",
in_atomic(), irqs_disabled());