Merge remote branch 'linus/master'
[powerpc.git] / kernel / sched.c
index 22712b2..3eedd52 100644 (file)
@@ -465,7 +465,7 @@ struct rq {
        u64 clock, prev_clock_raw;
        s64 clock_max_delta;
 
-       unsigned int clock_warps, clock_overflows;
+       unsigned int clock_warps, clock_overflows, clock_underflows;
        u64 idle_clock;
        unsigned int clock_deep_idle_events;
        u64 tick_timestamp;
@@ -858,7 +858,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
        struct rq *rq = cpu_rq(smp_processor_id());
        u64 now = sched_clock();
 
-       touch_softlockup_watchdog();
        rq->idle_clock += delta_ns;
        /*
         * Override the previous timestamp and ignore all
@@ -870,6 +869,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
        rq->prev_clock_raw = now;
        rq->clock += delta_ns;
        spin_unlock(&rq->lock);
+       touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
@@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 
 #define sched_class_highest (&rt_sched_class)
 
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
+static void inc_nr_running(struct rq *rq)
 {
        rq->nr_running++;
 }
 
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
+static void dec_nr_running(struct rq *rq)
 {
        rq->nr_running--;
 }
@@ -1350,11 +1350,11 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-       if (p->state == TASK_UNINTERRUPTIBLE)
+       if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, wakeup);
-       inc_nr_running(p, rq);
+       inc_nr_running(rq);
 }
 
 /*
@@ -1362,11 +1362,11 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-       if (p->state == TASK_UNINTERRUPTIBLE)
+       if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
        dequeue_task(rq, p, sleep);
-       dec_nr_running(p, rq);
+       dec_nr_running(rq);
 }
 
 /**
@@ -1893,14 +1893,13 @@ out:
        return success;
 }
 
-int fastcall wake_up_process(struct task_struct *p)
+int wake_up_process(struct task_struct *p)
 {
-       return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-                                TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+       return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
-int fastcall wake_up_state(struct task_struct *p, unsigned int state)
+int wake_up_state(struct task_struct *p, unsigned int state)
 {
        return try_to_wake_up(p, state, 0);
 }
@@ -1987,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
  * that must be done for every newly created context, then puts the task
  * on the runqueue and wakes it.
  */
-void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
        unsigned long flags;
        struct rq *rq;
@@ -2006,7 +2005,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                 * management (if any):
                 */
                p->sched_class->task_new(rq, p);
-               inc_nr_running(p, rq);
+               inc_nr_running(rq);
        }
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
@@ -3736,8 +3735,10 @@ void scheduler_tick(void)
        /*
         * Let rq->clock advance by at least TICK_NSEC:
         */
-       if (unlikely(rq->clock < next_tick))
+       if (unlikely(rq->clock < next_tick)) {
                rq->clock = next_tick;
+               rq->clock_underflows++;
+       }
        rq->tick_timestamp = rq->clock;
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
@@ -3752,7 +3753,7 @@ void scheduler_tick(void)
 
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
 
-void fastcall add_preempt_count(int val)
+void add_preempt_count(int val)
 {
        /*
         * Underflow?
@@ -3768,7 +3769,7 @@ void fastcall add_preempt_count(int val)
 }
 EXPORT_SYMBOL(add_preempt_count);
 
-void fastcall sub_preempt_count(int val)
+void sub_preempt_count(int val)
 {
        /*
         * Underflow?
@@ -3955,10 +3956,9 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
        struct task_struct *task = current;
        int saved_lock_depth;
-#endif
+
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task. Just return..
@@ -3974,14 +3974,10 @@ asmlinkage void __sched preempt_schedule(void)
                 * clear ->lock_depth so that schedule() doesnt
                 * auto-release the semaphore:
                 */
-#ifdef CONFIG_PREEMPT_BKL
                saved_lock_depth = task->lock_depth;
                task->lock_depth = -1;
-#endif
                schedule();
-#ifdef CONFIG_PREEMPT_BKL
                task->lock_depth = saved_lock_depth;
-#endif
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4002,10 +3998,9 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
        struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
        struct task_struct *task = current;
        int saved_lock_depth;
-#endif
+
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
@@ -4017,16 +4012,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
                 * clear ->lock_depth so that schedule() doesnt
                 * auto-release the semaphore:
                 */
-#ifdef CONFIG_PREEMPT_BKL
                saved_lock_depth = task->lock_depth;
                task->lock_depth = -1;
-#endif
                local_irq_enable();
                schedule();
                local_irq_disable();
-#ifdef CONFIG_PREEMPT_BKL
                task->lock_depth = saved_lock_depth;
-#endif
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4076,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
  * @key: is directly passed to the wakeup function
  */
-void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
+void __wake_up(wait_queue_head_t *q, unsigned int mode,
                        int nr_exclusive, void *key)
 {
        unsigned long flags;
@@ -4090,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up);
 /*
  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  */
-void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
        __wake_up_common(q, mode, 1, 0, NULL);
 }
@@ -4108,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  *
  * On UP it can prevent extra preemption.
  */
-void fastcall
+void
 __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
        unsigned long flags;
@@ -4132,8 +4123,7 @@ void complete(struct completion *x)
 
        spin_lock_irqsave(&x->wait.lock, flags);
        x->done++;
-       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-                        1, 0, NULL);
+       __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -4144,8 +4134,7 @@ void complete_all(struct completion *x)
 
        spin_lock_irqsave(&x->wait.lock, flags);
        x->done += UINT_MAX/2;
-       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-                        0, 0, NULL);
+       __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
@@ -4159,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
-                       if (state == TASK_INTERRUPTIBLE &&
-                           signal_pending(current)) {
+                       if ((state == TASK_INTERRUPTIBLE &&
+                            signal_pending(current)) ||
+                           (state == TASK_KILLABLE &&
+                            fatal_signal_pending(current))) {
                                __remove_wait_queue(&x->wait, &wait);
                                return -ERESTARTSYS;
                        }
@@ -4220,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
+int __sched wait_for_completion_killable(struct completion *x)
+{
+       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+       if (t == -ERESTARTSYS)
+               return t;
+       return 0;
+}
+EXPORT_SYMBOL(wait_for_completion_killable);
+
 static long __sched
 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
 {
@@ -4953,19 +4953,15 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int cond_resched_lock(spinlock_t *lock)
 {
+       int resched = need_resched() && system_state == SYSTEM_RUNNING;
        int ret = 0;
 
-       if (need_lockbreak(lock)) {
+       if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
-               cpu_relax();
-               ret = 1;
-               spin_lock(lock);
-       }
-       if (need_resched() && system_state == SYSTEM_RUNNING) {
-               spin_release(&lock->dep_map, 1, _THIS_IP_);
-               _raw_spin_unlock(lock);
-               preempt_enable_no_resched();
-               __cond_resched();
+               if (resched && need_resched())
+                       __cond_resched();
+               else
+                       cpu_relax();
                ret = 1;
                spin_lock(lock);
        }
@@ -5169,8 +5165,7 @@ void sched_show_task(struct task_struct *p)
        printk(KERN_CONT "%5lu %5d %6d\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent));
 
-       if (state != TASK_RUNNING)
-               show_stack(p, NULL);
+       show_stack(p, NULL);
 }
 
 void show_state_filter(unsigned long state_filter)
@@ -5241,11 +5236,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
        task_thread_info(idle)->preempt_count = 0;
-#endif
+
        /*
         * The idle tasks have their own, simple scheduling class:
         */