2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/sched.h>
11 #include <linux/errno.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
16 #include <asm/pgtable.h>
17 #include <asm/uaccess.h>
20 * Check that we have indeed attached to the thing..
22 int ptrace_check_attach(struct task_struct *child, int kill)
25 if (!(child->ptrace & PT_PTRACED))
28 if (child->p_pptr != current)
32 if (child->state != TASK_STOPPED)
35 /* Make sure the child gets off its CPU.. */
38 if (!task_has_cpu(child))
42 if (child->state != TASK_STOPPED)
46 } while (task_has_cpu(child));
52 /* All systems go.. */
56 int ptrace_attach(struct task_struct *task)
61 if (task->tgid == current->tgid)
65 if(((current->uid != task->euid) ||
66 (current->uid != task->suid) ||
67 (current->uid != task->uid) ||
68 (current->gid != task->egid) ||
69 (current->gid != task->sgid) ||
70 (!cap_issubset(task->cap_permitted, current->cap_permitted)) ||
71 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
74 if (!is_dumpable(task) && !capable(CAP_SYS_PTRACE))
76 /* the same process cannot be attached many times */
77 if (task->ptrace & PT_PTRACED)
81 task->ptrace |= PT_PTRACED;
82 if (capable(CAP_SYS_PTRACE))
83 task->ptrace |= PT_PTRACE_CAP;
86 write_lock_irq(&tasklist_lock);
87 if (task->p_pptr != current) {
89 task->p_pptr = current;
92 write_unlock_irq(&tasklist_lock);
94 send_sig(SIGSTOP, task, 1);
102 int ptrace_detach(struct task_struct *child, unsigned int data)
104 if ((unsigned long) data > _NSIG)
107 /* Architecture-specific hardware disable .. */
108 ptrace_disable(child);
110 /* .. re-parent .. */
112 child->exit_code = data;
113 write_lock_irq(&tasklist_lock);
115 child->p_pptr = child->p_opptr;
117 write_unlock_irq(&tasklist_lock);
119 /* .. and wake it up. */
120 wake_up_process(child);
125 * Access another process' address space.
126 * Source/target buffer must be kernel space,
127 * Do not walk the page table directly, use get_user_pages
130 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
132 struct mm_struct *mm;
133 struct vm_area_struct *vma;
137 /* Worry about races with exit() */
141 atomic_inc(&mm->mm_users);
146 down_read(&mm->mmap_sem);
147 /* ignore errors, just check how much was sucessfully transfered */
149 int bytes, ret, offset;
152 ret = get_user_pages(current, mm, addr, 1,
153 write, 1, &page, &vma);
158 offset = addr & (PAGE_SIZE-1);
159 if (bytes > PAGE_SIZE-offset)
160 bytes = PAGE_SIZE-offset;
162 flush_cache_page(vma, addr);
166 memcpy(maddr + offset, buf, bytes);
167 flush_page_to_ram(page);
168 flush_icache_user_range(vma, page, addr, len);
169 set_page_dirty(page);
171 memcpy(buf, maddr + offset, bytes);
172 flush_page_to_ram(page);
180 up_read(&mm->mmap_sem);
183 return buf - old_buf;
186 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len)
192 int this_len, retval;
194 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
195 retval = access_process_vm(tsk, src, buf, this_len, 0);
201 if (copy_to_user(dst, buf, retval))
211 int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len)
217 int this_len, retval;
219 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
220 if (copy_from_user(buf, src, this_len))
222 retval = access_process_vm(tsk, dst, buf, this_len, 1);