include upstream ip1000a driver version 2.09f
[linux-2.4.git] / arch / x86_64 / mm / ioremap.c
1 /*
2  * arch/x86_64/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  */
10
11 #include <linux/vmalloc.h>
12 #include <asm/io.h>
13 #include <asm/pgalloc.h>
14
15 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
16         unsigned long phys_addr, unsigned long flags)
17 {
18         unsigned long end;
19
20         address &= ~PMD_MASK;
21         end = address + size;
22         if (end > PMD_SIZE)
23                 end = PMD_SIZE;
24         if (address >= end)
25                 BUG();
26         do {
27                 if (!pte_none(*pte)) {
28                         printk("remap_area_pte: page already exists\n");
29                         BUG();
30                 }
31                 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
32                                         _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
33                 address += PAGE_SIZE;
34                 phys_addr += PAGE_SIZE;
35                 pte++;
36         } while (address && (address < end));
37 }
38
39 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
40         unsigned long phys_addr, unsigned long flags)
41 {
42         unsigned long end;
43
44         address &= ~PGDIR_MASK;
45         end = address + size;
46         if (end > PGDIR_SIZE)
47                 end = PGDIR_SIZE;
48         phys_addr -= address;
49         if (address >= end)
50                 BUG();
51         do {
52                 pte_t * pte = pte_alloc(&init_mm, pmd, address);
53                 if (!pte)
54                         return -ENOMEM;
55                 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
56                 address = (address + PMD_SIZE) & PMD_MASK;
57                 pmd++;
58         } while (address && (address < end));
59         return 0;
60 }
61
62 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
63                                  unsigned long size, unsigned long flags)
64 {
65         int error;
66         pgd_t * dir;
67         unsigned long end = address + size;
68
69         phys_addr -= address;
70         dir = pgd_offset_k(address);
71         flush_cache_all();
72         if (address >= end)
73                 BUG();
74         spin_lock(&init_mm.page_table_lock);
75         do {
76                 pmd_t *pmd;
77                 pmd = pmd_alloc(&init_mm, dir, address);
78                 error = -ENOMEM;
79                 if (!pmd)
80                         break;
81                 if (remap_area_pmd(pmd, address, end - address,
82                                          phys_addr + address, flags))
83                         break;
84                 error = 0;
85                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
86                 dir++;
87         } while (address && (address < end));
88         spin_unlock(&init_mm.page_table_lock);
89         flush_tlb_all();
90         return error;
91 }
92
93 /*
94  * Generic mapping function (not visible outside):
95  */
96
97 /*
98  * Remap an arbitrary physical address space into the kernel virtual
99  * address space. Needed when the kernel wants to access high addresses
100  * directly.
101  *
102  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
103  * have to convert them into an offset in a page-aligned mapping, but the
104  * caller shouldn't need to know that small detail.
105  */
106 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
107 {
108         void * addr;
109         struct vm_struct * area;
110         unsigned long offset, last_addr;
111
112         /* Don't allow wraparound or zero size */
113         last_addr = phys_addr + size - 1;
114         if (!size || last_addr < phys_addr)
115                 return NULL;
116
117         /*
118          * Don't remap the low PCI/ISA area, it's always mapped..
119          */
120         if (phys_addr >= 0xA0000 && last_addr < 0x100000)
121                 return phys_to_virt(phys_addr);
122
123         /*
124          * Don't allow anybody to remap normal RAM that we're using..
125          */
126         if (phys_addr < virt_to_phys(high_memory)) {
127                 char *t_addr, *t_end;
128
129                 t_addr = __va(phys_addr);
130                 t_end = t_addr + (size - 1);
131            
132 #ifndef CONFIG_DISCONTIGMEM
133                 struct page *page;
134                 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
135                         if(!PageReserved(page))
136                                 return NULL;
137 #endif
138         }
139
140         /*
141          * Mappings have to be page-aligned
142          */
143         offset = phys_addr & ~PAGE_MASK;
144         phys_addr &= PAGE_MASK;
145         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
146
147         /*
148          * Ok, go for it..
149          */
150         area = get_vm_area(size, VM_IOREMAP);
151         if (!area)
152                 return NULL;
153         addr = area->addr;
154         if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
155                 vfree(addr);
156                 return NULL;
157         }
158         return (void *) (offset + (char *)addr);
159 }
160
161 void iounmap(void *addr)
162 {
163         if (addr > high_memory)
164                 return vfree((void *) (PAGE_MASK & (unsigned long) addr));
165 }