import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / arch / sparc64 / mm / generic.c
1 /* $Id: generic.c,v 1.17 2001/04/09 04:08:06 davem Exp $
2  * generic.c: Generic Sparc mm routines that are not dependent upon
3  *            MMU type but are Sparc specific.
4  *
5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/page.h>
16
17 static inline void forget_pte(pte_t page)
18 {
19         if (pte_none(page))
20                 return;
21         if (pte_present(page)) {
22                 struct page *ptpage = pte_page(page);
23                 if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
24                         return;
25                 page_cache_release(ptpage);
26                 return;
27         }
28         swap_free(pte_to_swp_entry(page));
29 }
30
31 /* Remap IO memory, the same way as remap_page_range(), but use
32  * the obio memory space.
33  *
34  * They use a pgprot that sets PAGE_IO and does not check the
35  * mem_map table as this is independent of normal memory.
36  *
37  * As a special hack if the lowest bit of offset is set the
38  * side-effect bit will be turned off.  This is used as a
39  * performance improvement on FFB/AFB. -DaveM
40  */
41 static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
42         unsigned long offset, pgprot_t prot, int space)
43 {
44         unsigned long end;
45
46         address &= ~PMD_MASK;
47         end = address + size;
48         if (end > PMD_SIZE)
49                 end = PMD_SIZE;
50         do {
51                 pte_t oldpage;
52                 pte_t entry;
53                 unsigned long curend = address + PAGE_SIZE;
54                 
55                 entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
56                 if (!(address & 0xffff)) {
57                         if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
58                                 entry = mk_pte_io((offset & ~(0x1UL)),
59                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
60                                                   space);
61                                 curend = address + 0x400000;
62                                 offset += 0x400000;
63                         } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
64                                 entry = mk_pte_io((offset & ~(0x1UL)),
65                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
66                                                   space);
67                                 curend = address + 0x80000;
68                                 offset += 0x80000;
69                         } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
70                                 entry = mk_pte_io((offset & ~(0x1UL)),
71                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
72                                                   space);
73                                 curend = address + 0x10000;
74                                 offset += 0x10000;
75                         } else
76                                 offset += PAGE_SIZE;
77                 } else
78                         offset += PAGE_SIZE;
79
80                 if (offset & 0x1UL)
81                         pte_val(entry) &= ~(_PAGE_E);
82                 do {
83                         oldpage = *pte;
84                         pte_clear(pte);
85                         set_pte(pte, entry);
86                         forget_pte(oldpage);
87                         address += PAGE_SIZE;
88                         pte++;
89                 } while (address < curend);
90         } while (address < end);
91 }
92
93 static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
94         unsigned long offset, pgprot_t prot, int space)
95 {
96         unsigned long end;
97
98         address &= ~PGDIR_MASK;
99         end = address + size;
100         if (end > PGDIR_SIZE)
101                 end = PGDIR_SIZE;
102         offset -= address;
103         do {
104                 pte_t * pte = pte_alloc(current->mm, pmd, address);
105                 if (!pte)
106                         return -ENOMEM;
107                 io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
108                 address = (address + PMD_SIZE) & PMD_MASK;
109                 pmd++;
110         } while (address < end);
111         return 0;
112 }
113
114 int io_remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
115 {
116         int error = 0;
117         pgd_t * dir;
118         unsigned long beg = from;
119         unsigned long end = from + size;
120         struct mm_struct *mm = current->mm;
121
122         prot = __pgprot(pg_iobits);
123         offset -= from;
124         dir = pgd_offset(mm, from);
125         flush_cache_range(mm, beg, end);
126
127         spin_lock(&mm->page_table_lock);
128         while (from < end) {
129                 pmd_t *pmd = pmd_alloc(current->mm, dir, from);
130                 error = -ENOMEM;
131                 if (!pmd)
132                         break;
133                 error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
134                 if (error)
135                         break;
136                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
137                 dir++;
138         }
139         spin_unlock(&mm->page_table_lock);
140
141         flush_tlb_range(current->mm, beg, end);
142         return error;
143 }