4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
16 * 'sched.c' is the main kernel file. It contains scheduling primitives
17 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
18 * call functions (type getpid()), which just extract a field from
22 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/smp_lock.h>
26 #include <linux/nmi.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/completion.h>
30 #include <linux/prefetch.h>
31 #include <linux/compiler.h>
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
37 extern void timer_bh(void);
38 extern void tqueue_bh(void);
39 extern void immediate_bh(void);
45 unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
47 extern void mem_use(void);
52 * NOTE! The unix "nice" value influences how long a process
53 * gets. The nice value ranges from -20 to +19, where a -20
54 * is a "high-priority" task, and a "+10" is a low-priority
57 * We want the time-slice to be around 50ms or so, so this
58 * calculation depends on the value of HZ.
61 #define TICK_SCALE(x) ((x) >> 2)
63 #define TICK_SCALE(x) ((x) >> 1)
65 #define TICK_SCALE(x) (x)
67 #define TICK_SCALE(x) ((x) << 1)
69 #define TICK_SCALE(x) ((x) << 2)
72 #define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
76 * Init task must be ok at boot for the ix86 as we will check its signals
77 * via the SMP irq return path.
80 struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
83 * The tasklist_lock protects the linked list of processes.
85 * The runqueue_lock locks the parts that actually access
86 * and change the run-queues, and have to be interrupt-safe.
88 * If both locks are to be concurrently held, the runqueue_lock
89 * nests inside the tasklist_lock.
91 * task->alloc_lock nests inside tasklist_lock.
93 spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
94 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
96 static LIST_HEAD(runqueue_head);
99 * We align per-CPU scheduling data on cacheline boundaries,
100 * to prevent cacheline ping-pong.
103 struct schedule_data {
104 struct task_struct * curr;
105 cycles_t last_schedule;
107 char __pad [SMP_CACHE_BYTES];
108 } aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
110 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
111 #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
113 struct kernel_stat kstat;
114 extern struct task_struct *child_reaper;
118 #define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
119 #define can_schedule(p,cpu) \
120 ((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu))
124 #define idle_task(cpu) (&init_task)
125 #define can_schedule(p,cpu) (1)
129 void scheduling_functions_start_here(void) { }
132 * This is the function that decides how desirable a process is..
133 * You can weigh different processes against each other depending
134 * on what CPU they've run on lately etc to try to handle cache
135 * and TLB miss penalties.
138 * -1000: never select this
139 * 0: out of time, recalculate counters (but it might still be
141 * +ve: "goodness" value (the larger, the better)
142 * +1000: realtime process, select this.
145 static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
150 * select the current process after every other
151 * runnable process, but before the idle thread.
152 * Also, dont trigger a counter recalculation.
155 if (p->policy & SCHED_YIELD)
159 * Non-RT process - normal case first.
161 if (p->policy == SCHED_OTHER) {
163 * Give the process a first-approximation goodness value
164 * according to the number of clock-ticks it has left.
166 * Don't do any other calculations if the time slice is
174 /* Give a largish advantage to the same processor... */
175 /* (this is equivalent to penalizing other processors) */
176 if (p->processor == this_cpu)
177 weight += PROC_CHANGE_PENALTY;
180 /* .. and a slight advantage to the current MM */
181 if (p->mm == this_mm || !p->mm)
183 weight += 20 - p->nice;
188 * Realtime process, select the first one on the
189 * runqueue (taking priorities within processes
192 weight = 1000 + p->rt_priority;
198 * the 'goodness value' of replacing a process on a given CPU.
199 * positive value means 'replace', zero or negative means 'dont'.
201 static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
203 return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
207 * This is ugly, but reschedule_idle() is very timing-critical.
208 * We are called with the runqueue spinlock held and we must
209 * not claim the tasklist_lock.
211 static FASTCALL(void reschedule_idle(struct task_struct * p));
213 static void reschedule_idle(struct task_struct * p)
216 int this_cpu = smp_processor_id();
217 struct task_struct *tsk, *target_tsk;
218 int cpu, best_cpu, i, max_prio;
219 cycles_t oldest_idle;
222 * shortcut if the woken up task's last CPU is
225 best_cpu = p->processor;
226 if (can_schedule(p, best_cpu)) {
227 tsk = idle_task(best_cpu);
228 if (cpu_curr(best_cpu) == tsk) {
232 * If need_resched == -1 then we can skip sending
233 * the IPI altogether, tsk->need_resched is
234 * actively watched by the idle thread.
236 need_resched = tsk->need_resched;
237 tsk->need_resched = 1;
238 if ((best_cpu != this_cpu) && !need_resched)
239 smp_send_reschedule(best_cpu);
245 * We know that the preferred CPU has a cache-affine current
246 * process, lets try to find a new idle CPU for the woken-up
247 * process. Select the least recently active idle CPU. (that
248 * one will have the least active cache context.) Also find
249 * the executing process which has the least priority.
251 oldest_idle = (cycles_t) -1;
255 for (i = 0; i < smp_num_cpus; i++) {
256 cpu = cpu_logical_map(i);
257 if (!can_schedule(p, cpu))
261 * We use the first available idle CPU. This creates
262 * a priority list between idle CPUs, but this is not
265 if (tsk == idle_task(cpu)) {
266 #if defined(__i386__) && defined(CONFIG_SMP)
268 * Check if two siblings are idle in the same
269 * physical package. Use them if found.
271 if (smp_num_siblings == 2) {
272 if (cpu_curr(cpu_sibling_map[cpu]) ==
273 idle_task(cpu_sibling_map[cpu])) {
274 oldest_idle = last_schedule(cpu);
281 if (last_schedule(cpu) < oldest_idle) {
282 oldest_idle = last_schedule(cpu);
286 if (oldest_idle == -1ULL) {
287 int prio = preemption_goodness(tsk, p, cpu);
289 if (prio > max_prio) {
298 if (oldest_idle != -1ULL) {
299 best_cpu = tsk->processor;
302 tsk->need_resched = 1;
303 if (tsk->processor != this_cpu)
304 smp_send_reschedule(tsk->processor);
310 int this_cpu = smp_processor_id();
311 struct task_struct *tsk;
313 tsk = cpu_curr(this_cpu);
314 if (preemption_goodness(tsk, p, this_cpu) > 0)
315 tsk->need_resched = 1;
322 * This has to add the process to the _end_ of the
323 * run-queue, not the beginning. The goodness value will
324 * determine whether this process will run next. This is
325 * important to get SCHED_FIFO and SCHED_RR right, where
326 * a process that is either pre-empted or its time slice
327 * has expired, should be moved to the tail of the run
328 * queue for its priority - Bhavesh Davda
330 static inline void add_to_runqueue(struct task_struct * p)
332 list_add_tail(&p->run_list, &runqueue_head);
336 static inline void move_last_runqueue(struct task_struct * p)
338 list_del(&p->run_list);
339 list_add_tail(&p->run_list, &runqueue_head);
343 * Wake up a process. Put it on the run-queue if it's not
344 * already there. The "current" process is always on the
345 * run-queue (except when the actual re-schedule is in
346 * progress), and as such you're allowed to do the simpler
347 * "current->state = TASK_RUNNING" to mark yourself runnable
348 * without the overhead of this.
350 static inline int try_to_wake_up(struct task_struct * p, int synchronous)
356 * We want the common case fall through straight, thus the goto.
358 spin_lock_irqsave(&runqueue_lock, flags);
359 p->state = TASK_RUNNING;
360 if (task_on_runqueue(p))
363 if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id())))
367 spin_unlock_irqrestore(&runqueue_lock, flags);
371 inline int wake_up_process(struct task_struct * p)
373 return try_to_wake_up(p, 0);
376 static void process_timeout(unsigned long __data)
378 struct task_struct * p = (struct task_struct *) __data;
384 * schedule_timeout - sleep until timeout
385 * @timeout: timeout value in jiffies
387 * Make the current task sleep until @timeout jiffies have
388 * elapsed. The routine will return immediately unless
389 * the current task state has been set (see set_current_state()).
391 * You can set the task state as follows -
393 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
394 * pass before the routine returns. The routine will return 0
396 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
397 * delivered to the current task. In this case the remaining time
398 * in jiffies will be returned, or 0 if the timer expired in time
400 * The current task state is guaranteed to be TASK_RUNNING when this
403 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
404 * the CPU away without a bound on the timeout. In this case the return
405 * value will be %MAX_SCHEDULE_TIMEOUT.
407 * In all cases the return value is guaranteed to be non-negative.
409 signed long schedule_timeout(signed long timeout)
411 struct timer_list timer;
412 unsigned long expire;
416 case MAX_SCHEDULE_TIMEOUT:
418 * These two special cases are useful to be comfortable
419 * in the caller. Nothing more. We could take
420 * MAX_SCHEDULE_TIMEOUT from one of the negative value
421 * but I' d like to return a valid offset (>=0) to allow
422 * the caller to do everything it want with the retval.
428 * Another bit of PARANOID. Note that the retval will be
429 * 0 since no piece of kernel is supposed to do a check
430 * for a negative retval of schedule_timeout() (since it
431 * should never happens anyway). You just have the printk()
432 * that will tell you if something is gone wrong and where.
436 printk(KERN_ERR "schedule_timeout: wrong timeout "
437 "value %lx from %p\n", timeout,
438 __builtin_return_address(0));
439 current->state = TASK_RUNNING;
444 expire = timeout + jiffies;
447 timer.expires = expire;
448 timer.data = (unsigned long) current;
449 timer.function = process_timeout;
453 del_timer_sync(&timer);
455 timeout = expire - jiffies;
458 return timeout < 0 ? 0 : timeout;
462 * schedule_tail() is getting called from the fork return path. This
463 * cleans up all remaining scheduler things, without impacting the
466 static inline void __schedule_tail(struct task_struct *prev)
472 * prev->policy can be written from here only before `prev'
473 * can be scheduled (before setting prev->cpus_runnable to ~0UL).
474 * Of course it must also be read before allowing prev
475 * to be rescheduled, but since the write depends on the read
476 * to complete, wmb() is enough. (the spin_lock() acquired
477 * before setting cpus_runnable is not enough because the spin_lock()
478 * common code semantics allows code outside the critical section
479 * to enter inside the critical section)
481 policy = prev->policy;
482 prev->policy = policy & ~SCHED_YIELD;
486 * fast path falls through. We have to clear cpus_runnable before
487 * checking prev->state to avoid a wakeup race. Protect against
488 * the task exiting early.
491 task_release_cpu(prev);
493 if (prev->state == TASK_RUNNING)
497 task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
501 * Slow path - we 'push' the previous process and
502 * reschedule_idle() will attempt to find a new
503 * processor for it. (but it might preempt the
504 * current process as well.) We must take the runqueue
505 * lock and re-check prev->state to be correct. It might
506 * still happen that this process has a preemption
507 * 'in progress' already - but this is not a problem and
508 * might happen in other circumstances as well.
515 * Avoid taking the runqueue lock in cases where
516 * no preemption-check is necessery:
518 if ((prev == idle_task(smp_processor_id())) ||
519 (policy & SCHED_YIELD))
522 spin_lock_irqsave(&runqueue_lock, flags);
523 if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
524 reschedule_idle(prev);
525 spin_unlock_irqrestore(&runqueue_lock, flags);
529 prev->policy &= ~SCHED_YIELD;
530 #endif /* CONFIG_SMP */
533 asmlinkage void schedule_tail(struct task_struct *prev)
535 __schedule_tail(prev);
539 * 'schedule()' is the scheduler function. It's a very simple and nice
540 * scheduler: it's not perfect, but certainly works for most things.
542 * The goto is "interesting".
544 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
545 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
546 * information in task[0] is never used.
548 asmlinkage void schedule(void)
550 struct schedule_data * sched_data;
551 struct task_struct *prev, *next, *p;
552 struct list_head *tmp;
556 spin_lock_prefetch(&runqueue_lock);
558 BUG_ON(!current->active_mm);
561 this_cpu = prev->processor;
563 if (unlikely(in_interrupt())) {
564 printk("Scheduling in interrupt\n");
568 release_kernel_lock(prev, this_cpu);
571 * 'sched_data' is protected by the fact that we can run
572 * only one process per CPU.
574 sched_data = & aligned_data[this_cpu].schedule_data;
576 spin_lock_irq(&runqueue_lock);
578 /* move an exhausted RR process to be last.. */
579 if (unlikely(prev->policy == SCHED_RR))
580 if (!prev->counter) {
581 prev->counter = NICE_TO_TICKS(prev->nice);
582 move_last_runqueue(prev);
585 switch (prev->state) {
586 case TASK_INTERRUPTIBLE:
587 if (signal_pending(prev)) {
588 prev->state = TASK_RUNNING;
592 del_from_runqueue(prev);
595 prev->need_resched = 0;
598 * this is the scheduler proper:
603 * Default process to select..
605 next = idle_task(this_cpu);
607 list_for_each(tmp, &runqueue_head) {
608 p = list_entry(tmp, struct task_struct, run_list);
609 if (can_schedule(p, this_cpu)) {
610 int weight = goodness(p, this_cpu, prev->active_mm);
612 c = weight, next = p;
616 /* Do we need to re-calculate counters? */
618 struct task_struct *p;
620 spin_unlock_irq(&runqueue_lock);
621 read_lock(&tasklist_lock);
623 p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
624 read_unlock(&tasklist_lock);
625 spin_lock_irq(&runqueue_lock);
626 goto repeat_schedule;
630 * from this point on nothing can prevent us from
631 * switching to the next task, save this fact in
634 sched_data->curr = next;
635 task_set_cpu(next, this_cpu);
636 spin_unlock_irq(&runqueue_lock);
638 if (unlikely(prev == next)) {
639 /* We won't go through the normal tail, so do this by hand */
640 prev->policy &= ~SCHED_YIELD;
646 * maintain the per-process 'last schedule' value.
647 * (this has to be recalculated even if we reschedule to
648 * the same process) Currently this is only used on SMP,
649 * and it's approximate, so we do not have to maintain
650 * it while holding the runqueue spinlock.
652 sched_data->last_schedule = get_cycles();
655 * We drop the scheduler lock early (it's a global spinlock),
656 * thus we have to lock the previous process from getting
657 * rescheduled during switch_to().
660 #endif /* CONFIG_SMP */
662 kstat.context_swtch++;
664 * there are 3 processes which are affected by a context switch:
666 * prev == .... ==> (last => next)
668 * It's the 'much more previous' 'prev' that is on next's stack,
669 * but prev is set to (the just run) 'last' process by switch_to().
670 * This might sound slightly confusing but makes tons of sense.
674 struct mm_struct *mm = next->mm;
675 struct mm_struct *oldmm = prev->active_mm;
677 BUG_ON(next->active_mm);
678 next->active_mm = oldmm;
679 atomic_inc(&oldmm->mm_count);
680 enter_lazy_tlb(oldmm, next, this_cpu);
682 BUG_ON(next->active_mm != mm);
683 switch_mm(oldmm, mm, next, this_cpu);
687 prev->active_mm = NULL;
693 * This just switches the register state and the
696 switch_to(prev, next, prev);
697 __schedule_tail(prev);
700 reacquire_kernel_lock(current);
701 if (current->need_resched)
702 goto need_resched_back;
707 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
708 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
709 * non-exclusive tasks and one exclusive task.
711 * There are circumstances in which we can try to wake a task which has already
712 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
713 * in this (rare) case, and we handle it by contonuing to scan the queue.
715 static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
716 int nr_exclusive, const int sync)
718 struct list_head *tmp;
719 struct task_struct *p;
721 CHECK_MAGIC_WQHEAD(q);
722 WQ_CHECK_LIST_HEAD(&q->task_list);
724 list_for_each(tmp,&q->task_list) {
726 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
728 CHECK_MAGIC(curr->__magic);
733 if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
739 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
743 wq_read_lock_irqsave(&q->lock, flags);
744 __wake_up_common(q, mode, nr, 0);
745 wq_read_unlock_irqrestore(&q->lock, flags);
749 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
753 wq_read_lock_irqsave(&q->lock, flags);
754 __wake_up_common(q, mode, nr, 1);
755 wq_read_unlock_irqrestore(&q->lock, flags);
759 void complete(struct completion *x)
763 spin_lock_irqsave(&x->wait.lock, flags);
765 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
766 spin_unlock_irqrestore(&x->wait.lock, flags);
769 void wait_for_completion(struct completion *x)
771 spin_lock_irq(&x->wait.lock);
773 DECLARE_WAITQUEUE(wait, current);
775 wait.flags |= WQ_FLAG_EXCLUSIVE;
776 __add_wait_queue_tail(&x->wait, &wait);
778 __set_current_state(TASK_UNINTERRUPTIBLE);
779 spin_unlock_irq(&x->wait.lock);
781 spin_lock_irq(&x->wait.lock);
783 __remove_wait_queue(&x->wait, &wait);
786 spin_unlock_irq(&x->wait.lock);
789 #define SLEEP_ON_VAR \
790 unsigned long flags; \
792 init_waitqueue_entry(&wait, current);
794 #define SLEEP_ON_HEAD \
795 wq_write_lock_irqsave(&q->lock,flags); \
796 __add_wait_queue(q, &wait); \
797 wq_write_unlock(&q->lock);
799 #define SLEEP_ON_TAIL \
800 wq_write_lock_irq(&q->lock); \
801 __remove_wait_queue(q, &wait); \
802 wq_write_unlock_irqrestore(&q->lock,flags);
804 void interruptible_sleep_on(wait_queue_head_t *q)
808 current->state = TASK_INTERRUPTIBLE;
815 long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
819 current->state = TASK_INTERRUPTIBLE;
822 timeout = schedule_timeout(timeout);
828 void sleep_on(wait_queue_head_t *q)
832 current->state = TASK_UNINTERRUPTIBLE;
839 long sleep_on_timeout(wait_queue_head_t *q, long timeout)
843 current->state = TASK_UNINTERRUPTIBLE;
846 timeout = schedule_timeout(timeout);
852 void scheduling_functions_end_here(void) { }
857 * This has been replaced by sys_setpriority. Maybe it should be
858 * moved into the arch dependent tree for those ports that require
859 * it for backward compatibility?
862 asmlinkage long sys_nice(int increment)
867 * Setpriority might change our priority at the same moment.
868 * We don't have to worry. Conceptually one call occurs first
869 * and we have a single winner.
872 if (!capable(CAP_SYS_NICE))
880 newprio = current->nice + increment;
885 current->nice = newprio;
891 static inline struct task_struct *find_process_by_pid(pid_t pid)
893 struct task_struct *tsk = current;
896 tsk = find_task_by_pid(pid);
900 static int setscheduler(pid_t pid, int policy,
901 struct sched_param *param)
903 struct sched_param lp;
904 struct task_struct *p;
908 if (!param || pid < 0)
912 if (copy_from_user(&lp, param, sizeof(struct sched_param)))
916 * We play safe to avoid deadlocks.
918 read_lock_irq(&tasklist_lock);
919 spin_lock(&runqueue_lock);
921 p = find_process_by_pid(pid);
931 if (policy != SCHED_FIFO && policy != SCHED_RR &&
932 policy != SCHED_OTHER)
937 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
938 * priority for SCHED_OTHER is 0.
941 if (lp.sched_priority < 0 || lp.sched_priority > 99)
943 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
947 if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
948 !capable(CAP_SYS_NICE))
950 if ((current->euid != p->euid) && (current->euid != p->uid) &&
951 !capable(CAP_SYS_NICE))
956 p->rt_priority = lp.sched_priority;
958 current->need_resched = 1;
961 spin_unlock(&runqueue_lock);
962 read_unlock_irq(&tasklist_lock);
968 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
969 struct sched_param *param)
971 return setscheduler(pid, policy, param);
974 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
976 return setscheduler(pid, -1, param);
979 asmlinkage long sys_sched_getscheduler(pid_t pid)
981 struct task_struct *p;
989 read_lock(&tasklist_lock);
990 p = find_process_by_pid(pid);
992 retval = p->policy & ~SCHED_YIELD;
993 read_unlock(&tasklist_lock);
999 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
1001 struct task_struct *p;
1002 struct sched_param lp;
1006 if (!param || pid < 0)
1009 read_lock(&tasklist_lock);
1010 p = find_process_by_pid(pid);
1014 lp.sched_priority = p->rt_priority;
1015 read_unlock(&tasklist_lock);
1018 * This one might sleep, we cannot do it with a spinlock held ...
1020 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1026 read_unlock(&tasklist_lock);
1030 asmlinkage long sys_sched_yield(void)
1033 * Trick. sched_yield() first counts the number of truly
1034 * 'pending' runnable processes, then returns if it's
1035 * only the current processes. (This test does not have
1036 * to be atomic.) In threaded applications this optimization
1037 * gets triggered quite often.
1040 int nr_pending = nr_running;
1045 // Subtract non-idle processes running on other CPUs.
1046 for (i = 0; i < smp_num_cpus; i++) {
1047 int cpu = cpu_logical_map(i);
1048 if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
1052 // on UP this process is on the runqueue as well
1057 * This process can only be rescheduled by us,
1058 * so this is safe without any locking.
1060 if (current->policy == SCHED_OTHER)
1061 current->policy |= SCHED_YIELD;
1062 current->need_resched = 1;
1064 spin_lock_irq(&runqueue_lock);
1065 move_last_runqueue(current);
1066 spin_unlock_irq(&runqueue_lock);
1072 * yield - yield the current processor to other threads.
1074 * this is a shortcut for kernel-space yielding - it marks the
1075 * thread runnable and calls sys_sched_yield().
1079 set_current_state(TASK_RUNNING);
1084 void __cond_resched(void)
1086 set_current_state(TASK_RUNNING);
1090 asmlinkage long sys_sched_get_priority_max(int policy)
1106 asmlinkage long sys_sched_get_priority_min(int policy)
1121 asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1124 struct task_struct *p;
1125 int retval = -EINVAL;
1131 read_lock(&tasklist_lock);
1132 p = find_process_by_pid(pid);
1134 jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),
1136 read_unlock(&tasklist_lock);
1138 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1143 static void show_task(struct task_struct * p)
1145 unsigned long free = 0;
1147 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1149 printk("%-13.13s ", p->comm);
1150 state = p->state ? ffz(~p->state) + 1 : 0;
1151 if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
1152 printk(stat_nam[state]);
1155 #if (BITS_PER_LONG == 32)
1157 printk(" current ");
1159 printk(" %08lX ", thread_saved_pc(&p->thread));
1162 printk(" current task ");
1164 printk(" %016lx ", thread_saved_pc(&p->thread));
1167 unsigned long * n = (unsigned long *) (p+1);
1170 free = (unsigned long) n - (unsigned long)(p+1);
1172 printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
1174 printk("%5d ", p->p_cptr->pid);
1178 printk("%7d", p->p_ysptr->pid);
1182 printk(" %5d", p->p_osptr->pid);
1186 printk(" (L-TLB)\n");
1188 printk(" (NOTLB)\n");
1191 extern void show_trace_task(struct task_struct *tsk);
1196 char * render_sigset_t(sigset_t *set, char *buffer)
1201 if (sigismember(set, i+1)) x |= 1;
1202 if (sigismember(set, i+2)) x |= 2;
1203 if (sigismember(set, i+3)) x |= 4;
1204 if (sigismember(set, i+4)) x |= 8;
1205 *buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
1211 void show_state(void)
1213 struct task_struct *p;
1215 #if (BITS_PER_LONG == 32)
1218 printk(" task PC stack pid father child younger older\n");
1222 printk(" task PC stack pid father child younger older\n");
1224 read_lock(&tasklist_lock);
1227 * reset the NMI-timeout, listing all files on a slow
1228 * console might take alot of time:
1230 touch_nmi_watchdog();
1233 read_unlock(&tasklist_lock);
1237 * reparent_to_init() - Reparent the calling kernel thread to the init task.
1239 * If a kernel thread is launched as a result of a system call, or if
1240 * it ever exits, it should generally reparent itself to init so that
1241 * it is correctly cleaned up on exit.
1243 * The various task state such as scheduling policy and priority may have
1244 * been inherited fro a user process, so we reset them to sane values here.
1246 * NOTE that reparent_to_init() gives the caller full capabilities.
1248 void reparent_to_init(void)
1250 struct task_struct *this_task = current;
1252 write_lock_irq(&tasklist_lock);
1254 /* Reparent to init */
1255 REMOVE_LINKS(this_task);
1256 this_task->p_pptr = child_reaper;
1257 this_task->p_opptr = child_reaper;
1258 SET_LINKS(this_task);
1260 /* Set the exit signal to SIGCHLD so we signal init on exit */
1261 this_task->exit_signal = SIGCHLD;
1263 /* We also take the runqueue_lock while altering task fields
1264 * which affect scheduling decisions */
1265 spin_lock(&runqueue_lock);
1267 this_task->ptrace = 0;
1268 this_task->nice = DEF_NICE;
1269 this_task->policy = SCHED_OTHER;
1273 this_task->cap_effective = CAP_INIT_EFF_SET;
1274 this_task->cap_inheritable = CAP_INIT_INH_SET;
1275 this_task->cap_permitted = CAP_FULL_SET;
1276 this_task->keep_capabilities = 0;
1277 memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));
1278 this_task->user = INIT_USER;
1280 spin_unlock(&runqueue_lock);
1281 write_unlock_irq(&tasklist_lock);
1285 * Put all the gunge required to become a kernel thread without
1286 * attached user resources in one place where it belongs.
1289 void daemonize(void)
1291 struct fs_struct *fs;
1295 * If we were started as result of loading a module, close all of the
1296 * user space pages. We don't need them, and if we didn't close them
1297 * they would be locked into memory.
1301 current->session = 1;
1303 current->tty = NULL;
1305 /* Become as one with the init task */
1307 exit_fs(current); /* current->fs->count--; */
1310 atomic_inc(&fs->count);
1311 exit_files(current);
1312 current->files = init_task.files;
1313 atomic_inc(¤t->files->count);
1316 extern unsigned long wait_init_idle;
1318 void __init init_idle(void)
1320 struct schedule_data * sched_data;
1321 sched_data = &aligned_data[smp_processor_id()].schedule_data;
1323 if (current != &init_task && task_on_runqueue(current)) {
1324 printk("UGH! (%d:%d) was on the runqueue, removing.\n",
1325 smp_processor_id(), current->pid);
1326 del_from_runqueue(current);
1328 sched_data->curr = current;
1329 sched_data->last_schedule = get_cycles();
1330 clear_bit(current->processor, &wait_init_idle);
1333 extern void init_timervecs (void);
1335 void __init sched_init(void)
1338 * We have to do a little magic to get the first
1339 * process right in SMP mode.
1341 int cpu = smp_processor_id();
1346 printk("Enter sched_init\n");
1347 if (ppc_md.progress) ppc_md.progress("enter: sche_init",0x100);
1349 init_task.processor = cpu;
1351 for(nr = 0; nr < PIDHASH_SZ; nr++)
1356 printk("Enter init_timeves\n");
1357 if (ppc_md.progress) ppc_md.progress("enter: init_timervecs",0x101);
1361 init_bh(TIMER_BH, timer_bh);
1362 init_bh(TQUEUE_BH, tqueue_bh);
1363 init_bh(IMMEDIATE_BH, immediate_bh);
1366 * The boot idle thread does lazy MMU switching as well:
1368 atomic_inc(&init_mm.mm_count);
1371 printk("Enter lazy MMU\n");
1372 if (ppc_md.progress) ppc_md.progress("enter: lazy_mmu",0x102);
1374 enter_lazy_tlb(&init_mm, current, cpu);