2 * linux/arch/cris/mm/fault.c
4 * Low level bus fault handler
7 * Copyright (C) 2000, 2001 Axis Communications AB
14 #include <asm/uaccess.h>
15 #include <asm/pgtable.h>
16 #include <asm/arch/svinto.h>
18 /* debug of low-level TLB reload */
27 extern volatile pgd_t *current_pgd;
29 extern const struct exception_table_entry
30 *search_exception_tables(unsigned long addr);
32 asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs,
33 int protection, int writeaccess);
35 /* fast TLB-fill fault handler
36 * this is called from entry.S with interrupts disabled
40 handle_mmu_bus_fault(struct pt_regs *regs)
49 int miss, we, writeac;
52 unsigned long address;
56 address = cause & PAGE_MASK; /* get faulting address */
59 select = *R_TLB_SELECT;
60 page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause);
61 acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause);
62 inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause);
63 index = IO_EXTRACT(R_TLB_SELECT, index, select);
65 miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause);
66 we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause);
67 writeac = IO_EXTRACT(R_MMU_CAUSE, wr_rd, cause);
69 /* ETRAX 100LX TR89 bugfix: if the second half of an unaligned
70 * write causes a MMU-fault, it will not be restarted correctly.
71 * This could happen if a write crosses a page-boundary and the
72 * second page is not yet COW'ed or even loaded. The workaround
73 * is to clear the unaligned bit in the CPU status record, so
74 * that the CPU will rerun both the first and second halves of
75 * the instruction. This will not have any sideeffects unless
76 * the first half goes to any device or memory that can't be
77 * written twice, and which is mapped through the MMU.
79 * We only need to do this for writes.
83 regs->csrinstr &= ~(1 << 5);
85 D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n",
86 regs->irp, address, miss, inv, we, acc, index, page_id));
88 /* for a miss, we need to reload the TLB entry */
91 /* see if the pte exists at all
92 * refer through current_pgd, dont use mm->pgd
95 pmd = (pmd_t *)(current_pgd + pgd_index(address));
97 do_page_fault(address, regs, 0, writeac);
101 printk("bad pgdir entry 0x%lx at 0x%p\n", *(unsigned long*)pmd, pmd);
105 pte = *pte_offset_kernel(pmd, address);
106 if (!pte_present(pte)) {
107 do_page_fault(address, regs, 0, writeac);
112 printk(" found pte %lx pg %p ", pte_val(pte), pte_page(pte));
113 if (pte_val(pte) & _PAGE_SILENT_WRITE)
115 if (pte_val(pte) & _PAGE_KERNEL)
117 if (pte_val(pte) & _PAGE_SILENT_READ)
119 if (pte_val(pte) & _PAGE_GLOBAL)
121 if (pte_val(pte) & _PAGE_PRESENT)
123 if (pte_val(pte) & _PAGE_ACCESSED)
125 if (pte_val(pte) & _PAGE_MODIFIED)
127 if (pte_val(pte) & _PAGE_READ)
129 if (pte_val(pte) & _PAGE_WRITE)
130 printk("Writeable ");
134 /* load up the chosen TLB entry
135 * this assumes the pte format is the same as the TLB_LO layout.
137 * the write to R_TLB_LO also writes the vpn and page_id fields from
138 * R_MMU_CAUSE, which we in this case obviously want to keep
141 *R_TLB_LO = pte_val(pte);
146 /* leave it to the MM system fault handler */
147 do_page_fault(address, regs, 1, we);
150 /* Called from arch/cris/mm/fault.c to find fixup code. */
152 find_fixup_code(struct pt_regs *regs)
154 const struct exception_table_entry *fixup;
156 if ((fixup = search_exception_tables(regs->irp)) != 0) {
157 /* Adjust the instruction pointer in the stackframe. */
158 regs->irp = fixup->fixup;
161 * Don't return by restoring the CPU state, so switch
164 regs->frametype = CRIS_FRAME_NORMAL;