2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/sched.h>
11 #include <linux/errno.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
16 #include <asm/pgtable.h>
17 #include <asm/uaccess.h>
20 * Check that we have indeed attached to the thing..
22 int ptrace_check_attach(struct task_struct *child, int kill)
24 if (!(child->ptrace & PT_PTRACED))
27 if (child->p_pptr != current)
31 if (child->state != TASK_STOPPED)
34 /* Make sure the child gets off its CPU.. */
37 if (!task_has_cpu(child))
41 if (child->state != TASK_STOPPED)
45 } while (task_has_cpu(child));
51 /* All systems go.. */
55 int ptrace_attach(struct task_struct *task)
64 if(((current->uid != task->euid) ||
65 (current->uid != task->suid) ||
66 (current->uid != task->uid) ||
67 (current->gid != task->egid) ||
68 (current->gid != task->sgid) ||
69 (!cap_issubset(task->cap_permitted, current->cap_permitted)) ||
70 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
73 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
75 /* the same process cannot be attached many times */
76 if (task->ptrace & PT_PTRACED)
80 task->ptrace |= PT_PTRACED;
81 if (capable(CAP_SYS_PTRACE))
82 task->ptrace |= PT_PTRACE_CAP;
85 write_lock_irq(&tasklist_lock);
86 if (task->p_pptr != current) {
88 task->p_pptr = current;
91 write_unlock_irq(&tasklist_lock);
93 send_sig(SIGSTOP, task, 1);
101 int ptrace_detach(struct task_struct *child, unsigned int data)
103 if ((unsigned long) data > _NSIG)
106 /* Architecture-specific hardware disable .. */
107 ptrace_disable(child);
109 /* .. re-parent .. */
111 child->exit_code = data;
112 write_lock_irq(&tasklist_lock);
114 child->p_pptr = child->p_opptr;
116 write_unlock_irq(&tasklist_lock);
118 /* .. and wake it up. */
119 wake_up_process(child);
124 * Access another process' address space.
125 * Source/target buffer must be kernel space,
126 * Do not walk the page table directly, use get_user_pages
129 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
131 struct mm_struct *mm;
132 struct vm_area_struct *vma;
136 /* Worry about races with exit() */
140 atomic_inc(&mm->mm_users);
145 down_read(&mm->mmap_sem);
146 /* ignore errors, just check how much was sucessfully transfered */
148 int bytes, ret, offset;
151 ret = get_user_pages(current, mm, addr, 1,
152 write, 1, &page, &vma);
157 offset = addr & (PAGE_SIZE-1);
158 if (bytes > PAGE_SIZE-offset)
159 bytes = PAGE_SIZE-offset;
161 flush_cache_page(vma, addr);
165 memcpy(maddr + offset, buf, bytes);
166 flush_page_to_ram(page);
167 flush_icache_user_range(vma, page, addr, len);
169 memcpy(buf, maddr + offset, bytes);
170 flush_page_to_ram(page);
178 up_read(&mm->mmap_sem);
181 return buf - old_buf;
184 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len)
190 int this_len, retval;
192 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
193 retval = access_process_vm(tsk, src, buf, this_len, 0);
199 if (copy_to_user(dst, buf, retval))
209 int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len)
215 int this_len, retval;
217 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
218 if (copy_from_user(buf, src, this_len))
220 retval = access_process_vm(tsk, dst, buf, this_len, 1);