uml: more page fault path trimming
[powerpc.git] / arch / um / kernel / skas / tlb.c
1 /* 
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/stddef.h"
8 #include "linux/sched.h"
9 #include "linux/mm.h"
10 #include "asm/page.h"
11 #include "asm/pgtable.h"
12 #include "asm/mmu.h"
13 #include "mem_user.h"
14 #include "mem.h"
15 #include "skas.h"
16 #include "os.h"
17 #include "tlb.h"
18
19 static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
20                   int finished, void **flush)
21 {
22         struct host_vm_op *op;
23         int i, ret = 0;
24
25         for(i = 0; i <= last && !ret; i++){
26                 op = &ops[i];
27                 switch(op->type){
28                 case MMAP:
29                         ret = map(&mmu->skas.id, op->u.mmap.addr,
30                                   op->u.mmap.len, op->u.mmap.prot,
31                                   op->u.mmap.fd, op->u.mmap.offset, finished,
32                                   flush);
33                         break;
34                 case MUNMAP:
35                         ret = unmap(&mmu->skas.id, op->u.munmap.addr,
36                                     op->u.munmap.len, finished, flush);
37                         break;
38                 case MPROTECT:
39                         ret = protect(&mmu->skas.id, op->u.mprotect.addr,
40                                       op->u.mprotect.len, op->u.mprotect.prot,
41                                       finished, flush);
42                         break;
43                 default:
44                         printk("Unknown op type %d in do_ops\n", op->type);
45                         break;
46                 }
47         }
48
49         return ret;
50 }
51
52 extern int proc_mm;
53
54 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
55                       unsigned long end_addr, int force)
56 {
57         if(!proc_mm && (end_addr > CONFIG_STUB_START))
58                 end_addr = CONFIG_STUB_START;
59
60         fix_range_common(mm, start_addr, end_addr, force, do_ops);
61 }
62
63 void __flush_tlb_one_skas(unsigned long addr)
64 {
65         flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
66 }
67
68 void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, 
69                      unsigned long end)
70 {
71         if(vma->vm_mm == NULL)
72                 flush_tlb_kernel_range_common(start, end);
73         else fix_range(vma->vm_mm, start, end, 0);
74 }
75
76 void flush_tlb_mm_skas(struct mm_struct *mm)
77 {
78         unsigned long end;
79
80         /* Don't bother flushing if this address space is about to be
81          * destroyed.
82          */
83         if(atomic_read(&mm->mm_users) == 0)
84                 return;
85
86         end = proc_mm ? task_size : CONFIG_STUB_START;
87         fix_range(mm, 0, end, 0);
88 }
89
90 void force_flush_all_skas(void)
91 {
92         unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
93         fix_range(current->mm, 0, end, 1);
94 }
95
96 void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
97 {
98         pgd_t *pgd;
99         pud_t *pud;
100         pmd_t *pmd;
101         pte_t *pte;
102         struct mm_struct *mm = vma->vm_mm;
103         void *flush = NULL;
104         int r, w, x, prot, err = 0;
105         struct mm_id *mm_id;
106
107         pgd = pgd_offset(mm, address);
108         if(!pgd_present(*pgd))
109                 goto kill;
110
111         pud = pud_offset(pgd, address);
112         if(!pud_present(*pud))
113                 goto kill;
114
115         pmd = pmd_offset(pud, address);
116         if(!pmd_present(*pmd))
117                 goto kill;
118
119         pte = pte_offset_kernel(pmd, address);
120
121         r = pte_read(*pte);
122         w = pte_write(*pte);
123         x = pte_exec(*pte);
124         if (!pte_young(*pte)) {
125                 r = 0;
126                 w = 0;
127         } else if (!pte_dirty(*pte)) {
128                 w = 0;
129         }
130
131         mm_id = &mm->context.skas.id;
132         prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
133                 (x ? UM_PROT_EXEC : 0));
134         if(pte_newpage(*pte)){
135                 if(pte_present(*pte)){
136                         unsigned long long offset;
137                         int fd;
138
139                         fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
140                         err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
141                                   1, &flush);
142                 }
143                 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
144         }
145         else if(pte_newprot(*pte))
146                 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
147
148         if(err)
149                 goto kill;
150
151         *pte = pte_mkuptodate(*pte);
152
153         return;
154
155 kill:
156         printk("Failed to flush page for address 0x%lx\n", address);
157         force_sig(SIGKILL, current);
158 }
159