cleanup: remove task_t and convert all the uses to struct task_struct. I
introduced it for the scheduler anno and it was a mistake.
Conversion was mostly scripted, the result was reviewed and all
secondary whitespace and style impact (if any) was fixed up by hand.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
25 files changed:
-thread_saved_pc(task_t *t)
+thread_saved_pc(struct task_struct *t)
{
unsigned long base = (unsigned long)task_stack_page(t);
unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
{
unsigned long base = (unsigned long)task_stack_page(t);
unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
-ia64_mca_modify_comm(const task_t *previous_current)
+ia64_mca_modify_comm(const struct task_struct *previous_current)
{
char *p, comm[sizeof(current->comm)];
if (previous_current->pid)
{
char *p, comm[sizeof(current->comm)];
if (previous_current->pid)
* that we can do backtrace on the MCA/INIT handler code itself.
*/
* that we can do backtrace on the MCA/INIT handler code itself.
*/
+static struct task_struct *
ia64_mca_modify_original_stack(struct pt_regs *regs,
const struct switch_stack *sw,
struct ia64_sal_os_state *sos,
ia64_mca_modify_original_stack(struct pt_regs *regs,
const struct switch_stack *sw,
struct ia64_sal_os_state *sos,
ia64_va va;
extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
const pal_min_state_area_t *ms = sos->pal_min_state;
ia64_va va;
extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
const pal_min_state_area_t *ms = sos->pal_min_state;
- task_t *previous_current;
+ struct task_struct *previous_current;
struct pt_regs *old_regs;
struct switch_stack *old_sw;
unsigned size = sizeof(struct pt_regs) +
struct pt_regs *old_regs;
struct switch_stack *old_sw;
unsigned size = sizeof(struct pt_regs) +
pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
&sos->proc_state_param;
int recover, cpu = smp_processor_id();
pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
&sos->proc_state_param;
int recover, cpu = smp_processor_id();
- task_t *previous_current;
+ struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
{
static atomic_t slaves;
static atomic_t monarchs;
{
static atomic_t slaves;
static atomic_t monarchs;
- task_t *previous_current;
+ struct task_struct *previous_current;
int cpu = smp_processor_id();
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
int cpu = smp_processor_id();
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
extern void start_ap (void);
extern unsigned long ia64_iobase;
extern void start_ap (void);
extern unsigned long ia64_iobase;
-task_t *task_for_booting_cpu;
+struct task_struct *task_for_booting_cpu;
#endif
FEXPORT(ret_from_fork)
#endif
FEXPORT(ret_from_fork)
- jal schedule_tail # a0 = task_t *prev
+ jal schedule_tail # a0 = struct task_struct *prev
FEXPORT(syscall_exit)
local_irq_disable # make sure need_resched and
FEXPORT(syscall_exit)
local_irq_disable # make sure need_resched and
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
* cloned here.
*/
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
* cloned here.
*/
-static inline task_t *find_process_by_pid(pid_t pid)
+static inline struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
{
return pid ? find_task_by_pid(pid) : current;
}
cpumask_t new_mask;
cpumask_t effective_mask;
int retval;
cpumask_t new_mask;
cpumask_t effective_mask;
int retval;
if (len < sizeof(new_mask))
return -EINVAL;
if (len < sizeof(new_mask))
return -EINVAL;
unsigned int real_len;
cpumask_t mask;
int retval;
unsigned int real_len;
cpumask_t mask;
int retval;
real_len = sizeof(mask);
if (len < real_len)
real_len = sizeof(mask);
if (len < real_len)
panic("read failed in suspend_new_thread, err = %d", -err);
}
panic("read failed in suspend_new_thread, err = %d", -err);
}
-void schedule_tail(task_t *prev);
+void schedule_tail(struct task_struct *prev);
static void new_thread_handler(int sig)
{
static void new_thread_handler(int sig)
{
static int tiocsctty(struct tty_struct *tty, int arg)
{
static int tiocsctty(struct tty_struct *tty, int arg)
{
if (current->signal->leader &&
(current->signal->session == tty->session))
if (current->signal->leader &&
(current->signal->session == tty->session))
*/
struct wake_task_node {
struct list_head llink;
*/
struct wake_task_node {
struct list_head llink;
+ struct task_struct *task;
wait_queue_head_t *wq;
};
wait_queue_head_t *wq;
};
{
int wake_nests = 0;
unsigned long flags;
{
int wake_nests = 0;
unsigned long flags;
- task_t *this_task = current;
+ struct task_struct *this_task = current;
struct list_head *lsthead = &psw->wake_task_list, *lnk;
struct wake_task_node *tncur;
struct wake_task_node tnode;
struct list_head *lsthead = &psw->wake_task_list, *lnk;
struct wake_task_node *tncur;
struct wake_task_node tnode;
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
-#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
+#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
#endif /* !__ASSEMBLY */
#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
#endif /* !__ASSEMBLY */
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
*
- * `next' and `prev' should be task_t, but it isn't always defined
+ * `next' and `prev' should be struct task_struct, but it isn't always defined
*/
#define switch_to(prev, next, last) do { \
*/
#define switch_to(prev, next, last) do { \
*/
#define switch_to(prev, next, last) do { \
*/
#define switch_to(prev, next, last) do { \
+ struct task_struct *__last; \
register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
-typedef struct task_struct task_t;
extern void sched_init(void);
extern void sched_init_smp(void);
extern void sched_init(void);
extern void sched_init_smp(void);
-extern void init_idle(task_t *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
extern cpumask_t nohz_cpu_mask;
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
+ struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
/* shared signal handling: */
struct sigpending shared_pending;
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct*);
+extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
-extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
+extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
-static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
if (!cpu_isset(0, new_mask))
return -EINVAL;
{
if (!cpu_isset(0, new_mask))
return -EINVAL;
#endif
extern unsigned long long sched_clock(void);
#endif
extern unsigned long long sched_clock(void);
-extern unsigned long long current_sched_time(const task_t *current_task);
+extern unsigned long long
+current_sched_time(const struct task_struct *current_task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES
extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(task_t *p);
-extern void rt_mutex_setprio(task_t *p, int prio);
-extern void rt_mutex_adjust_pi(task_t *p);
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline int rt_mutex_getprio(task_t *p)
+static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif
-extern void set_user_nice(task_t *p, long nice);
-extern int task_prio(const task_t *p);
-extern int task_nice(const task_t *p);
-extern int can_nice(const task_t *p, const int nice);
-extern int task_curr(const task_t *p);
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+extern int task_nice(const struct task_struct *p);
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
-extern task_t *idle_task(int cpu);
-extern task_t *curr_task(int cpu);
-extern void set_curr_task(int cpu, task_t *p);
+extern struct task_struct *idle_task(int cpu);
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
-extern void FASTCALL(sched_exit(task_t * p));
+extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
+extern void FASTCALL(sched_exit(struct task_struct * p));
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern task_t *child_reaper;
+extern struct task_struct *child_reaper;
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
+struct task_struct *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
-extern void wait_task_inactive(task_t * p);
+extern void wait_task_inactive(struct task_struct * p);
#else
#define wait_task_inactive(p) do { } while (0)
#endif
#else
#define wait_task_inactive(p) do { } while (0)
#endif
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p) (p == p->group_leader)
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p) (p == p->group_leader)
-static inline task_t *next_thread(const task_t *p)
+static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry(rcu_dereference(p->thread_group.next),
{
return list_entry(rcu_dereference(p->thread_group.next),
+ struct task_struct, thread_group);
-static inline int thread_group_empty(task_t *p)
+static inline int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
{
return list_empty(&p->thread_group);
}
int ret = 0;
pid_t pid;
__u32 version;
int ret = 0;
pid_t pid;
__u32 version;
+ struct task_struct *target;
struct __user_cap_data_struct data;
if (get_user(version, &header->version))
struct __user_cap_data_struct data;
if (get_user(version, &header->version))
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
+ struct task_struct *g, *target;
int ret = -EPERM;
int found = 0;
int ret = -EPERM;
int found = 0;
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
+ struct task_struct *g, *target;
int ret = -EPERM;
int found = 0;
int ret = -EPERM;
int found = 0;
{
kernel_cap_t inheritable, permitted, effective;
__u32 version;
{
kernel_cap_t inheritable, permitted, effective;
__u32 version;
+ struct task_struct *target;
void release_task(struct task_struct * p)
{
void release_task(struct task_struct * p)
{
+ struct task_struct *leader;
repeat:
atomic_dec(&p->user->processes);
write_lock_irq(&tasklist_lock);
repeat:
atomic_dec(&p->user->processes);
write_lock_irq(&tasklist_lock);
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
-static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
+static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
{
struct task_struct *p;
int ret = 1;
{
struct task_struct *p;
int ret = 1;
-static inline void choose_new_parent(task_t *p, task_t *reaper)
+static inline void
+choose_new_parent(struct task_struct *p, struct task_struct *reaper)
{
/*
* Make sure we're not reparenting to ourselves and that
{
/*
* Make sure we're not reparenting to ourselves and that
p->real_parent = reaper;
}
p->real_parent = reaper;
}
-static void reparent_thread(task_t *p, task_t *father, int traced)
+static void
+reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
/* We don't want people slaying init. */
if (p->exit_signal != -1)
{
/* We don't want people slaying init. */
if (p->exit_signal != -1)
* group, and if no such member exists, give it to
* the global child reaper process (ie "init")
*/
* group, and if no such member exists, give it to
* the global child reaper process (ie "init")
*/
-static void forget_original_parent(struct task_struct * father,
- struct list_head *to_release)
+static void
+forget_original_parent(struct task_struct *father, struct list_head *to_release)
{
struct task_struct *p, *reaper = father;
struct list_head *_p, *_n;
{
struct task_struct *p, *reaper = father;
struct list_head *_p, *_n;
*/
list_for_each_safe(_p, _n, &father->children) {
int ptrace;
*/
list_for_each_safe(_p, _n, &father->children) {
int ptrace;
- p = list_entry(_p,struct task_struct,sibling);
+ p = list_entry(_p, struct task_struct, sibling);
list_add(&p->ptrace_list, to_release);
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
list_add(&p->ptrace_list, to_release);
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
- p = list_entry(_p,struct task_struct,ptrace_list);
+ p = list_entry(_p, struct task_struct, ptrace_list);
choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
list_for_each_safe(_p, _n, &ptrace_dead) {
list_del_init(_p);
list_for_each_safe(_p, _n, &ptrace_dead) {
list_del_init(_p);
- t = list_entry(_p,struct task_struct,ptrace_list);
+ t = list_entry(_p, struct task_struct, ptrace_list);
do_group_exit((error_code & 0xff) << 8);
}
do_group_exit((error_code & 0xff) << 8);
}
-static int eligible_child(pid_t pid, int options, task_t *p)
+static int eligible_child(pid_t pid, int options, struct task_struct *p)
{
if (pid > 0) {
if (p->pid != pid)
{
if (pid > 0) {
if (p->pid != pid)
-static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
+static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
int why, int status,
struct siginfo __user *infop,
struct rusage __user *rusagep)
{
int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
int why, int status,
struct siginfo __user *infop,
struct rusage __user *rusagep)
{
int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
put_task_struct(p);
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
put_task_struct(p);
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_zombie(task_t *p, int noreap,
+static int wait_task_zombie(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
- struct siginfo __user *infop,
+static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
+ int noreap, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_continued(task_t *p, int noreap,
+static int wait_task_continued(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int ret;
list_for_each(_p,&tsk->children) {
int ret;
list_for_each(_p,&tsk->children) {
- p = list_entry(_p,struct task_struct,sibling);
+ p = list_entry(_p, struct task_struct, sibling);
ret = eligible_child(pid, options, p);
if (!ret)
ret = eligible_child(pid, options, p);
if (!ret)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static task_t *copy_process(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr,
- int pid)
+static struct task_struct *copy_process(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+ int __user *parent_tidptr,
+ int __user *child_tidptr,
+ int pid)
{
int retval;
struct task_struct *p = NULL;
{
int retval;
struct task_struct *p = NULL;
-task_t * __devinit fork_idle(int cpu)
+struct task_struct * __devinit fork_idle(int cpu)
+ struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0);
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0);
return HRTIMER_NORESTART;
}
return HRTIMER_NORESTART;
}
-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task)
+void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
-int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
+int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
{
struct pid_link *link;
struct pid *pid;
{
struct pid_link *link;
struct pid *pid;
-void fastcall detach_pid(task_t *task, enum pid_type type)
+void fastcall detach_pid(struct task_struct *task, enum pid_type type)
{
struct pid_link *link;
struct pid *pid;
{
struct pid_link *link;
struct pid *pid;
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
-task_t *find_task_by_pid_type(int type, int nr)
+struct task_struct *find_task_by_pid_type(int type, int nr)
{
return pid_task(find_pid(nr), type);
}
{
return pid_task(find_pid(nr), type);
}
*
* Must be called with the tasklist lock write-held.
*/
*
* Must be called with the tasklist lock write-held.
*/
-void __ptrace_link(task_t *child, task_t *new_parent)
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
BUG_ON(!list_empty(&child->ptrace_list));
if (child->parent == new_parent)
{
BUG_ON(!list_empty(&child->ptrace_list));
if (child->parent == new_parent)
* TASK_TRACED, resume it now.
* Requires that irqs be disabled.
*/
* TASK_TRACED, resume it now.
* Requires that irqs be disabled.
*/
-void ptrace_untrace(task_t *child)
+void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
if (child->state == TASK_TRACED) {
{
spin_lock(&child->sighand->siglock);
if (child->state == TASK_TRACED) {
*
* Must be called with the tasklist lock write-held.
*/
*
* Must be called with the tasklist lock write-held.
*/
-void __ptrace_unlink(task_t *child)
+void __ptrace_unlink(struct task_struct *child)
{
BUG_ON(!child->ptrace);
{
BUG_ON(!child->ptrace);
-static void printk_task(task_t *p)
+static void printk_task(struct task_struct *p)
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
-void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
+void
+rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
};
static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
};
static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
-static task_t *threads[MAX_RT_TEST_THREADS];
+static struct task_struct *threads[MAX_RT_TEST_THREADS];
static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
enum test_opcodes {
static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
enum test_opcodes {
static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
{
struct test_thread_data *td;
static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
{
struct test_thread_data *td;
+ struct task_struct *tsk;
int i;
td = container_of(dev, struct test_thread_data, sysdev);
int i;
td = container_of(dev, struct test_thread_data, sysdev);
* Decreases task's usage by one - may thus free the task.
* Returns 0 or -EDEADLK.
*/
* Decreases task's usage by one - may thus free the task.
* Returns 0 or -EDEADLK.
*/
-static int rt_mutex_adjust_prio_chain(task_t *task,
+static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
struct rt_mutex_waiter *orig_waiter,
int deadlock_detect,
struct rt_mutex *orig_lock,
struct rt_mutex_waiter *orig_waiter,
spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
struct rt_mutex_waiter *waiter,
int detect_deadlock)
{
struct rt_mutex_waiter *waiter,
int detect_deadlock)
{
+ struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
struct rt_mutex_waiter *top_waiter = waiter;
- task_t *owner = rt_mutex_owner(lock);
- int boost = 0, res;
spin_lock_irqsave(¤t->pi_lock, flags);
__rt_mutex_adjust_prio(current);
spin_lock_irqsave(¤t->pi_lock, flags);
__rt_mutex_adjust_prio(current);
struct rt_mutex_waiter *waiter)
{
int first = (waiter == rt_mutex_top_waiter(lock));
struct rt_mutex_waiter *waiter)
{
int first = (waiter == rt_mutex_top_waiter(lock));
- int boost = 0;
- task_t *owner = rt_mutex_owner(lock);
+ struct task_struct *owner = rt_mutex_owner(lock);
spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
-static inline unsigned int task_timeslice(task_t *p)
+static inline unsigned int task_timeslice(struct task_struct *p)
{
return static_prio_timeslice(p->static_prio);
}
{
return static_prio_timeslice(p->static_prio);
}
unsigned long expired_timestamp;
unsigned long long timestamp_last_tick;
unsigned long expired_timestamp;
unsigned long long timestamp_last_tick;
+ struct task_struct *curr, *idle;
struct mm_struct *prev_mm;
prio_array_t *active, *expired, arrays[2];
int best_expired_prio;
struct mm_struct *prev_mm;
prio_array_t *active, *expired, arrays[2];
int best_expired_prio;
int active_balance;
int push_cpu;
int active_balance;
int push_cpu;
- task_t *migration_thread;
+ struct task_struct *migration_thread;
struct list_head migration_queue;
#endif
struct list_head migration_queue;
#endif
#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline int task_running(runqueue_t *rq, task_t *p)
+static inline int task_running(runqueue_t *rq, struct task_struct *p)
{
return rq->curr == p;
}
{
return rq->curr == p;
}
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(runqueue_t *rq, task_t *p)
+static inline int task_running(runqueue_t *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->oncpu;
{
#ifdef CONFIG_SMP
return p->oncpu;
-static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)
-static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)
* __task_rq_lock - lock the runqueue a given task resides on.
* Must be called interrupts disabled.
*/
* __task_rq_lock - lock the runqueue a given task resides on.
* Must be called interrupts disabled.
*/
-static inline runqueue_t *__task_rq_lock(task_t *p)
+static inline runqueue_t *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct runqueue *rq;
__acquires(rq->lock)
{
struct runqueue *rq;
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
-static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(rq->lock)
{
struct runqueue *rq;
__acquires(rq->lock)
{
struct runqueue *rq;
* long it was from the *first* time it was queued to the time that it
* finally hit a cpu.
*/
* long it was from the *first* time it was queued to the time that it
* finally hit a cpu.
*/
-static inline void sched_info_dequeued(task_t *t)
+static inline void sched_info_dequeued(struct task_struct *t)
{
t->sched_info.last_queued = 0;
}
{
t->sched_info.last_queued = 0;
}
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
-static void sched_info_arrive(task_t *t)
+static void sched_info_arrive(struct task_struct *t)
{
unsigned long now = jiffies, diff = 0;
struct runqueue *rq = task_rq(t);
{
unsigned long now = jiffies, diff = 0;
struct runqueue *rq = task_rq(t);
* the timestamp if it is already not set. It's assumed that
* sched_info_dequeued() will clear that stamp when appropriate.
*/
* the timestamp if it is already not set. It's assumed that
* sched_info_dequeued() will clear that stamp when appropriate.
*/
-static inline void sched_info_queued(task_t *t)
+static inline void sched_info_queued(struct task_struct *t)
{
if (!t->sched_info.last_queued)
t->sched_info.last_queued = jiffies;
{
if (!t->sched_info.last_queued)
t->sched_info.last_queued = jiffies;
* Called when a process ceases being the active-running process, either
* voluntarily or involuntarily. Now we can calculate how long we ran.
*/
* Called when a process ceases being the active-running process, either
* voluntarily or involuntarily. Now we can calculate how long we ran.
*/
-static inline void sched_info_depart(task_t *t)
+static inline void sched_info_depart(struct task_struct *t)
{
struct runqueue *rq = task_rq(t);
unsigned long diff = jiffies - t->sched_info.last_arrival;
{
struct runqueue *rq = task_rq(t);
unsigned long diff = jiffies - t->sched_info.last_arrival;
* their time slice. (This may also be called when switching to or from
* the idle task.) We are only called when prev != next.
*/
* their time slice. (This may also be called when switching to or from
* the idle task.) We are only called when prev != next.
*/
-static inline void sched_info_switch(task_t *prev, task_t *next)
+static inline void
+sched_info_switch(struct task_struct *prev, struct task_struct *next)
{
struct runqueue *rq = task_rq(prev);
{
struct runqueue *rq = task_rq(prev);
* Both properties are important to certain workloads.
*/
* Both properties are important to certain workloads.
*/
-static inline int __normal_prio(task_t *p)
+static inline int __normal_prio(struct task_struct *p)
#define RTPRIO_TO_LOAD_WEIGHT(rp) \
(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
#define RTPRIO_TO_LOAD_WEIGHT(rp) \
(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
-static void set_load_weight(task_t *p)
+static void set_load_weight(struct task_struct *p)
{
if (has_rt_policy(p)) {
#ifdef CONFIG_SMP
{
if (has_rt_policy(p)) {
#ifdef CONFIG_SMP
p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
}
p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
}
-static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+static inline void
+inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)
{
rq->raw_weighted_load += p->load_weight;
}
{
rq->raw_weighted_load += p->load_weight;
}
-static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+static inline void
+dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)
{
rq->raw_weighted_load -= p->load_weight;
}
{
rq->raw_weighted_load -= p->load_weight;
}
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running++;
inc_raw_weighted_load(rq, p);
}
{
rq->nr_running++;
inc_raw_weighted_load(rq, p);
}
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
dec_raw_weighted_load(rq, p);
{
rq->nr_running--;
dec_raw_weighted_load(rq, p);
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
-static inline int normal_prio(task_t *p)
+static inline int normal_prio(struct task_struct *p)
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
-static int effective_prio(task_t *p)
+static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
{
p->normal_prio = normal_prio(p);
/*
/*
* __activate_task - move a task to the runqueue.
*/
/*
* __activate_task - move a task to the runqueue.
*/
-static void __activate_task(task_t *p, runqueue_t *rq)
+static void __activate_task(struct task_struct *p, runqueue_t *rq)
{
prio_array_t *target = rq->active;
{
prio_array_t *target = rq->active;
/*
* __activate_idle_task - move idle task to the _front_ of runqueue.
*/
/*
* __activate_idle_task - move idle task to the _front_ of runqueue.
*/
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
inc_nr_running(p, rq);
{
enqueue_task_head(p, rq->active);
inc_nr_running(p, rq);
* Recalculate p->normal_prio and p->prio after having slept,
* updating the sleep-average too:
*/
* Recalculate p->normal_prio and p->prio after having slept,
* updating the sleep-average too:
*/
-static int recalc_task_prio(task_t *p, unsigned long long now)
+static int recalc_task_prio(struct task_struct *p, unsigned long long now)
{
/* Caller must always ensure 'now >= p->timestamp' */
unsigned long sleep_time = now - p->timestamp;
{
/* Caller must always ensure 'now >= p->timestamp' */
unsigned long sleep_time = now - p->timestamp;
* Update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
*/
* Update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
*/
-static void activate_task(task_t *p, runqueue_t *rq, int local)
+static void activate_task(struct task_struct *p, runqueue_t *rq, int local)
{
unsigned long long now;
{
unsigned long long now;
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
-static void resched_task(task_t *p)
+static void resched_task(struct task_struct *p)
smp_send_reschedule(cpu);
}
#else
smp_send_reschedule(cpu);
}
#else
-static inline void resched_task(task_t *p)
+static inline void resched_task(struct task_struct *p)
{
assert_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
{
assert_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
-inline int task_curr(const task_t *p)
+inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
{
return cpu_curr(task_cpu(p)) == p;
}
typedef struct {
struct list_head list;
typedef struct {
struct list_head list;
+ struct task_struct *task;
int dest_cpu;
struct completion done;
int dest_cpu;
struct completion done;
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
-static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
+static int
+migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req)
{
runqueue_t *rq = task_rq(p);
{
runqueue_t *rq = task_rq(p);
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
-void wait_task_inactive(task_t *p)
+void wait_task_inactive(struct task_struct *p)
{
unsigned long flags;
runqueue_t *rq;
{
unsigned long flags;
runqueue_t *rq;
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
-void kick_process(task_t *p)
+void kick_process(struct task_struct *p)
* Returns the CPU we should wake onto.
*/
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
* Returns the CPU we should wake onto.
*/
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
-static int wake_idle(int cpu, task_t *p)
+static int wake_idle(int cpu, struct task_struct *p)
{
cpumask_t tmp;
struct sched_domain *sd;
{
cpumask_t tmp;
struct sched_domain *sd;
-static inline int wake_idle(int cpu, task_t *p)
+static inline int wake_idle(int cpu, struct task_struct *p)
*
* returns failure only if the task is already active.
*/
*
* returns failure only if the task is already active.
*/
-static int try_to_wake_up(task_t *p, unsigned int state, int sync)
+static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
{
int cpu, this_cpu, success = 0;
unsigned long flags;
{
int cpu, this_cpu, success = 0;
unsigned long flags;
-int fastcall wake_up_process(task_t *p)
+int fastcall wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wake_up_process);
{
return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wake_up_process);
-int fastcall wake_up_state(task_t *p, unsigned int state)
+int fastcall wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
{
return try_to_wake_up(p, state, 0);
}
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*/
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*/
-void fastcall sched_fork(task_t *p, int clone_flags)
+void fastcall sched_fork(struct task_struct *p, int clone_flags)
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
-void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
+void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
int this_cpu, cpu;
{
unsigned long flags;
int this_cpu, cpu;
* artificially, because any timeslice recovered here
* was given away by the parent in the first place.)
*/
* artificially, because any timeslice recovered here
* was given away by the parent in the first place.)
*/
-void fastcall sched_exit(task_t *p)
+void fastcall sched_exit(struct task_struct *p)
{
unsigned long flags;
runqueue_t *rq;
{
unsigned long flags;
runqueue_t *rq;
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
-static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
+static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next)
{
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
{
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
-static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
+static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
-asmlinkage void schedule_tail(task_t *prev)
+asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
runqueue_t *rq = this_rq();
__releases(rq->lock)
{
runqueue_t *rq = this_rq();
* context_switch - switch to the new MM and the new
* thread's register state.
*/
* context_switch - switch to the new MM and the new
* thread's register state.
*/
-static inline
-task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
+static inline struct task_struct *
+context_switch(runqueue_t *rq, struct task_struct *prev,
+ struct task_struct *next)
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
* allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/
* allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/
-static void sched_migrate_task(task_t *p, int dest_cpu)
+static void sched_migrate_task(struct task_struct *p, int dest_cpu)
{
migration_req_t req;
runqueue_t *rq;
{
migration_req_t req;
runqueue_t *rq;
if (migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
struct task_struct *mt = rq->migration_thread;
if (migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
struct task_struct *mt = rq->migration_thread;
get_task_struct(mt);
task_rq_unlock(rq, &flags);
wake_up_process(mt);
put_task_struct(mt);
wait_for_completion(&req.done);
get_task_struct(mt);
task_rq_unlock(rq, &flags);
wake_up_process(mt);
put_task_struct(mt);
wait_for_completion(&req.done);
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
*/
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
*/
-static
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
- runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+static void pull_task(runqueue_t *src_rq, prio_array_t *src_array,
+ struct task_struct *p, runqueue_t *this_rq,
+ prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
dec_nr_running(p, src_rq);
{
dequeue_task(p, src_array);
dec_nr_running(p, src_rq);
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
-int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
+int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu,
struct sched_domain *sd, enum idle_type idle,
int *all_pinned)
{
struct sched_domain *sd, enum idle_type idle,
int *all_pinned)
{
best_prio_seen, skip_for_load;
prio_array_t *array, *dst_array;
struct list_head *head, *curr;
best_prio_seen, skip_for_load;
prio_array_t *array, *dst_array;
struct list_head *head, *curr;
+ struct task_struct *tmp;
if (max_nr_move == 0 || max_load_move == 0)
goto out;
if (max_nr_move == 0 || max_load_move == 0)
goto out;
head = array->queue + idx;
curr = head->prev;
skip_queue:
head = array->queue + idx;
curr = head->prev;
skip_queue:
- tmp = list_entry(curr, task_t, run_list);
+ tmp = list_entry(curr, struct task_struct, run_list);
* Bank in p->sched_time the ns elapsed since the last tick or switch.
*/
static inline void
* Bank in p->sched_time the ns elapsed since the last tick or switch.
*/
static inline void
-update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now)
+update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now)
{
p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
}
{
p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
}
* Return current->sched_time plus any more ns on the sched_clock
* that have not yet been banked.
*/
* Return current->sched_time plus any more ns on the sched_clock
* that have not yet been banked.
*/
-unsigned long long current_sched_time(const task_t *p)
+unsigned long long current_sched_time(const struct task_struct *p)
{
unsigned long long ns;
unsigned long flags;
{
unsigned long long ns;
unsigned long flags;
void scheduler_tick(void)
{
unsigned long long now = sched_clock();
void scheduler_tick(void)
{
unsigned long long now = sched_clock();
+ struct task_struct *p = current;
int cpu = smp_processor_id();
runqueue_t *rq = this_rq();
int cpu = smp_processor_id();
runqueue_t *rq = this_rq();
update_cpu_clock(p, rq, now);
update_cpu_clock(p, rq, now);
* utilize, if another task runs on a sibling. This models the
* slowdown effect of other tasks running on siblings:
*/
* utilize, if another task runs on a sibling. This models the
* slowdown effect of other tasks running on siblings:
*/
-static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+static inline unsigned long
+smt_slice(struct task_struct *p, struct sched_domain *sd)
{
return p->time_slice * (100 - sd->per_cpu_gain) / 100;
}
{
return p->time_slice * (100 - sd->per_cpu_gain) / 100;
}
* acquire their lock. As we only trylock the normal locking order does not
* need to be obeyed.
*/
* acquire their lock. As we only trylock the normal locking order does not
* need to be obeyed.
*/
-static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
+static int
+dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)
{
struct sched_domain *tmp, *sd = NULL;
int ret = 0, i;
{
struct sched_domain *tmp, *sd = NULL;
int ret = 0, i;
return 0;
for_each_cpu_mask(i, sd->span) {
return 0;
for_each_cpu_mask(i, sd->span) {
+ struct task_struct *smt_curr;
if (i == this_cpu)
continue;
if (i == this_cpu)
continue;
-dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
+dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)
*/
asmlinkage void __sched schedule(void)
{
*/
asmlinkage void __sched schedule(void)
{
+ struct task_struct *prev, *next;
struct list_head *queue;
unsigned long long now;
unsigned long run_time;
int cpu, idx, new_prio;
struct list_head *queue;
unsigned long long now;
unsigned long run_time;
int cpu, idx, new_prio;
prio_array_t *array;
long *switch_count;
runqueue_t *rq;
prio_array_t *array;
long *switch_count;
runqueue_t *rq;
idx = sched_find_first_bit(array->bitmap);
queue = array->queue + idx;
idx = sched_find_first_bit(array->bitmap);
queue = array->queue + idx;
- next = list_entry(queue->next, task_t, run_list);
+ next = list_entry(queue->next, struct task_struct, run_list);
if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
unsigned long long delta = now - next->timestamp;
if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
unsigned long long delta = now - next->timestamp;
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
-void rt_mutex_setprio(task_t *p, int prio)
+void rt_mutex_setprio(struct task_struct *p, int prio)
{
unsigned long flags;
prio_array_t *array;
{
unsigned long flags;
prio_array_t *array;
-void set_user_nice(task_t *p, long nice)
+void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta;
unsigned long flags;
{
int old_prio, delta;
unsigned long flags;
* @p: task
* @nice: nice value
*/
* @p: task
* @nice: nice value
*/
-int can_nice(const task_t *p, const int nice)
+int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
-int task_prio(const task_t *p)
+int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
{
return p->prio - MAX_RT_PRIO;
}
* task_nice - return the nice value of a given task.
* @p: the task in question.
*/
* task_nice - return the nice value of a given task.
* @p: the task in question.
*/
-int task_nice(const task_t *p)
+int task_nice(const struct task_struct *p)
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*/
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*/
-task_t *idle_task(int cpu)
+struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
{
return cpu_rq(cpu)->idle;
}
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
-static inline task_t *find_process_by_pid(pid_t pid)
+static inline struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
{
return pid ? find_task_by_pid(pid) : current;
}
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
struct sched_param lparam;
struct task_struct *p;
if (!param || pid < 0)
return -EINVAL;
if (!param || pid < 0)
return -EINVAL;
read_unlock_irq(&tasklist_lock);
retval = sched_setscheduler(p, policy, &lparam);
put_task_struct(p);
read_unlock_irq(&tasklist_lock);
retval = sched_setscheduler(p, policy, &lparam);
put_task_struct(p);
*/
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
*/
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
if (pid < 0)
goto out_nounlock;
if (pid < 0)
goto out_nounlock;
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
{
struct sched_param lp;
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
{
struct sched_param lp;
if (!param || pid < 0)
goto out_nounlock;
if (!param || pid < 0)
goto out_nounlock;
long sched_setaffinity(pid_t pid, cpumask_t new_mask)
{
long sched_setaffinity(pid_t pid, cpumask_t new_mask)
{
- task_t *p;
- int retval;
+ struct task_struct *p;
+ int retval;
lock_cpu_hotplug();
read_lock(&tasklist_lock);
lock_cpu_hotplug();
read_lock(&tasklist_lock);
long sched_getaffinity(pid_t pid, cpumask_t *mask)
{
long sched_getaffinity(pid_t pid, cpumask_t *mask)
{
lock_cpu_hotplug();
read_lock(&tasklist_lock);
lock_cpu_hotplug();
read_lock(&tasklist_lock);
asmlinkage
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{
asmlinkage
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{
int retval = -EINVAL;
struct timespec t;
int retval = -EINVAL;
struct timespec t;
if (pid < 0)
goto out_nounlock;
if (pid < 0)
goto out_nounlock;
return list_entry(p->sibling.next,struct task_struct,sibling);
}
return list_entry(p->sibling.next,struct task_struct,sibling);
}
-static void show_task(task_t *p)
+static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
+
+static void show_task(struct task_struct *p)
- task_t *relative;
- unsigned state;
+ struct task_struct *relative;
- static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
printk("%-13.13s ", p->comm);
state = p->state ? __ffs(p->state) + 1 : 0;
printk("%-13.13s ", p->comm);
state = p->state ? __ffs(p->state) + 1 : 0;
+ struct task_struct *g, *p;
#if (BITS_PER_LONG == 32)
printk("\n"
#if (BITS_PER_LONG == 32)
printk("\n"
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
-void __devinit init_idle(task_t *idle, int cpu)
+void __devinit init_idle(struct task_struct *idle, int cpu)
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
unsigned long flags;
migration_req_t req;
{
unsigned long flags;
migration_req_t req;
-static void migrate_dead(unsigned int dead_cpu, task_t *p)
+static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
{
struct runqueue *rq = cpu_rq(dead_cpu);
{
struct runqueue *rq = cpu_rq(dead_cpu);
struct list_head *list = &rq->arrays[arr].queue[i];
while (!list_empty(list))
struct list_head *list = &rq->arrays[arr].queue[i];
while (!list_empty(list))
- migrate_dead(dead_cpu,
- list_entry(list->next, task_t,
- run_list));
+ migrate_dead(dead_cpu, list_entry(list->next,
+ struct task_struct, run_list));
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
-task_t *curr_task(int cpu)
+struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
{
return cpu_curr(cpu);
}
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
-void set_curr_task(int cpu, task_t *p)
+void set_curr_task(int cpu, struct task_struct *p)
static void process_timeout(unsigned long __data)
{
static void process_timeout(unsigned long __data)
{
- wake_up_process((task_t *)__data);
+ wake_up_process((struct task_struct *)__data);
wait_queue_head_t work_done;
struct workqueue_struct *wq;
wait_queue_head_t work_done;
struct workqueue_struct *wq;
+ struct task_struct *thread;
int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
* CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
* we select a process with CAP_SYS_RAW_IO set).
*/
* CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
* we select a process with CAP_SYS_RAW_IO set).
*/
-static void __oom_kill_task(task_t *p, const char *message)
+static void __oom_kill_task(struct task_struct *p, const char *message)
{
if (p->pid == 1) {
WARN_ON(1);
{
if (p->pid == 1) {
WARN_ON(1);
-static int oom_kill_task(task_t *p, const char *message)
+static int oom_kill_task(struct task_struct *p, const char *message)
+ struct task_struct *g, *q;
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
{
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
{
unsigned long points = 0;
if (printk_ratelimit()) {
unsigned long points = 0;
if (printk_ratelimit()) {