uml: speed page fault path
[powerpc.git] / arch / um / kernel / skas / tlb.c
index 304a5b0..c43901a 100644 (file)
@@ -32,8 +32,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
                                  op->u.mmap.offset, finished, flush);
                        break;
                case MUNMAP:
-                       ret = unmap(&mmu->skas.id,
-                                   (void *) op->u.munmap.addr,
+                       ret = unmap(&mmu->skas.id, op->u.munmap.addr,
                                    op->u.munmap.len, finished, flush);
                        break;
                case MPROTECT:
@@ -94,3 +93,66 @@ void force_flush_all_skas(void)
        unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
         fix_range(current->mm, 0, end, 1);
 }
+
+void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       struct mm_struct *mm = vma->vm_mm;
+       void *flush = NULL;
+       int r, w, x, err = 0;
+       struct mm_id *mm_id;
+
+       pgd = pgd_offset(vma->vm_mm, address);
+       if(!pgd_present(*pgd))
+               goto kill;
+
+       pud = pud_offset(pgd, address);
+       if(!pud_present(*pud))
+               goto kill;
+
+       pmd = pmd_offset(pud, address);
+       if(!pmd_present(*pmd))
+               goto kill;
+
+       pte = pte_offset_kernel(pmd, address);
+
+       r = pte_read(*pte);
+       w = pte_write(*pte);
+       x = pte_exec(*pte);
+       if (!pte_young(*pte)) {
+               r = 0;
+               w = 0;
+       } else if (!pte_dirty(*pte)) {
+               w = 0;
+       }
+
+       mm_id = &mm->context.skas.id;
+       if(pte_newpage(*pte)){
+               if(pte_present(*pte)){
+                       unsigned long long offset;
+                       int fd;
+
+                       fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
+                       err = map(mm_id, address, PAGE_SIZE, r, w, x, fd,
+                                 offset, 1, &flush);
+               }
+               else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
+       }
+       else if(pte_newprot(*pte))
+               err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush);
+
+       if(err)
+               goto kill;
+
+       *pte = pte_mkuptodate(*pte);
+
+       return;
+
+kill:
+       printk("Failed to flush page for address 0x%lx\n", address);
+       force_sig(SIGKILL, current);
+}
+