2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
14 #include <asm/fixmap.h>
15 #include <asm/cacheflush.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgtable.h>
19 #define ISA_START_ADDRESS 0xa0000
20 #define ISA_END_ADDRESS 0x100000
23 * Remap an arbitrary physical address space into the kernel virtual
24 * address space. Needed when the kernel wants to access high addresses
27 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
28 * have to convert them into an offset in a page-aligned mapping, but the
29 * caller shouldn't need to know that small detail.
31 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
35 struct vm_struct *area;
36 unsigned long offset, last_addr;
39 /* Don't allow wraparound or zero size */
40 last_addr = phys_addr + size - 1;
41 if (!size || last_addr < phys_addr)
45 * Don't remap the low PCI/ISA area, it's always mapped..
47 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
48 return (void __iomem *) phys_to_virt(phys_addr);
51 * Don't allow anybody to remap normal RAM that we're using..
53 if (phys_addr <= virt_to_phys(high_memory - 1)) {
57 t_addr = __va(phys_addr);
58 t_end = t_addr + (size - 1);
60 for (page = virt_to_page(t_addr);
61 page <= virt_to_page(t_end); page++)
62 if (!PageReserved(page))
66 prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
69 * Mappings have to be page-aligned
71 offset = phys_addr & ~PAGE_MASK;
72 phys_addr &= PAGE_MASK;
73 size = PAGE_ALIGN(last_addr+1) - phys_addr;
78 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
81 area->phys_addr = phys_addr;
82 addr = (void __iomem *) area->addr;
83 if (ioremap_page_range((unsigned long) addr,
84 (unsigned long) addr + size, phys_addr, prot)) {
85 vunmap((void __force *) addr);
88 return (void __iomem *) (offset + (char __iomem *)addr);
90 EXPORT_SYMBOL(__ioremap);
93 * ioremap_nocache - map bus memory into CPU space
94 * @offset: bus address of the memory
95 * @size: size of the resource to map
97 * ioremap_nocache performs a platform specific sequence of operations to
98 * make bus memory CPU accessible via the readb/readw/readl/writeb/
99 * writew/writel functions and the other mmio helpers. The returned
100 * address is not guaranteed to be usable directly as a virtual
103 * This version of ioremap ensures that the memory is marked uncachable
104 * on the CPU as well as honouring existing caching rules from things like
105 * the PCI bus. Note that there are other caches and buffers on many
106 * busses. In particular driver authors should read up on PCI writes
108 * It's useful if some control registers are in such an area and
109 * write combining or read caching is not desirable:
111 * Must be freed with iounmap.
113 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
115 unsigned long last_addr;
116 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
121 /* Guaranteed to be > phys_addr, as per __ioremap() */
122 last_addr = phys_addr + size - 1;
124 if (last_addr < virt_to_phys(high_memory) - 1) {
125 struct page *ppage = virt_to_page(__va(phys_addr));
126 unsigned long npages;
128 phys_addr &= PAGE_MASK;
130 /* This might overflow and become zero.. */
131 last_addr = PAGE_ALIGN(last_addr);
133 /* .. but that's ok, because modulo-2**n arithmetic will make
134 * the page-aligned "last - first" come out right.
136 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
138 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
147 EXPORT_SYMBOL(ioremap_nocache);
150 * iounmap - Free a IO remapping
151 * @addr: virtual address from ioremap_*
153 * Caller must ensure there is only one unmapping for the same pointer.
155 void iounmap(volatile void __iomem *addr)
157 struct vm_struct *p, *o;
159 if ((void __force *)addr <= high_memory)
163 * __ioremap special-cases the PCI/ISA range by not instantiating a
164 * vm_area and by simply returning an address into the kernel mapping
165 * of ISA space. So handle that here.
167 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
168 addr < phys_to_virt(ISA_END_ADDRESS))
171 addr = (volatile void __iomem *)
172 (PAGE_MASK & (unsigned long __force)addr);
174 /* Use the vm area unlocked, assuming the caller
175 ensures there isn't another iounmap for the same address
176 in parallel. Reuse of the virtual address is prevented by
177 leaving it in the global lists until we're done with it.
178 cpa takes care of the direct mappings. */
179 read_lock(&vmlist_lock);
180 for (p = vmlist; p; p = p->next) {
184 read_unlock(&vmlist_lock);
187 printk(KERN_ERR "iounmap: bad address %p\n", addr);
192 /* Reset the direct mapping. Can block */
193 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
194 change_page_attr(virt_to_page(__va(p->phys_addr)),
195 get_vm_area_size(p) >> PAGE_SHIFT,
200 /* Finally remove it */
201 o = remove_vm_area((void *)addr);
202 BUG_ON(p != o || o == NULL);
205 EXPORT_SYMBOL(iounmap);
208 int __initdata early_ioremap_debug;
210 static int __init early_ioremap_debug_setup(char *str)
212 early_ioremap_debug = 1;
216 early_param("early_ioremap_debug", early_ioremap_debug_setup);
218 static __initdata int after_paging_init;
219 static __initdata unsigned long bm_pte[1024]
220 __attribute__((aligned(PAGE_SIZE)));
222 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
224 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
227 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
229 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
232 void __init early_ioremap_init(void)
236 if (early_ioremap_debug)
237 printk(KERN_DEBUG "early_ioremap_init()\n");
239 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
240 *pgd = __pa(bm_pte) | _PAGE_TABLE;
241 memset(bm_pte, 0, sizeof(bm_pte));
243 * The boot-ioremap range spans multiple pgds, for which
244 * we are not prepared:
246 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
248 printk(KERN_WARNING "pgd %p != %p\n",
249 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
250 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
251 fix_to_virt(FIX_BTMAP_BEGIN));
252 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
253 fix_to_virt(FIX_BTMAP_END));
255 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
256 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
261 void __init early_ioremap_clear(void)
265 if (early_ioremap_debug)
266 printk(KERN_DEBUG "early_ioremap_clear()\n");
268 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
273 void __init early_ioremap_reset(void)
275 enum fixed_addresses idx;
276 unsigned long *pte, phys, addr;
278 after_paging_init = 1;
279 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
280 addr = fix_to_virt(idx);
281 pte = early_ioremap_pte(addr);
282 if (!*pte & _PAGE_PRESENT) {
283 phys = *pte & PAGE_MASK;
284 set_fixmap(idx, phys);
289 static void __init __early_set_fixmap(enum fixed_addresses idx,
290 unsigned long phys, pgprot_t flags)
292 unsigned long *pte, addr = __fix_to_virt(idx);
294 if (idx >= __end_of_fixed_addresses) {
298 pte = early_ioremap_pte(addr);
299 if (pgprot_val(flags))
300 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
303 __flush_tlb_one(addr);
306 static inline void __init early_set_fixmap(enum fixed_addresses idx,
309 if (after_paging_init)
310 set_fixmap(idx, phys);
312 __early_set_fixmap(idx, phys, PAGE_KERNEL);
315 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
317 if (after_paging_init)
320 __early_set_fixmap(idx, 0, __pgprot(0));
324 int __initdata early_ioremap_nested;
326 static int __init check_early_ioremap_leak(void)
328 if (!early_ioremap_nested)
332 "Debug warning: early ioremap leak of %d areas detected.\n",
333 early_ioremap_nested);
335 "please boot with early_ioremap_debug and report the dmesg.\n");
340 late_initcall(check_early_ioremap_leak);
342 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
344 unsigned long offset, last_addr;
345 unsigned int nrpages, nesting;
346 enum fixed_addresses idx0, idx;
348 WARN_ON(system_state != SYSTEM_BOOTING);
350 nesting = early_ioremap_nested;
351 if (early_ioremap_debug) {
352 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
353 phys_addr, size, nesting);
357 /* Don't allow wraparound or zero size */
358 last_addr = phys_addr + size - 1;
359 if (!size || last_addr < phys_addr) {
364 if (nesting >= FIX_BTMAPS_NESTING) {
368 early_ioremap_nested++;
370 * Mappings have to be page-aligned
372 offset = phys_addr & ~PAGE_MASK;
373 phys_addr &= PAGE_MASK;
374 size = PAGE_ALIGN(last_addr) - phys_addr;
377 * Mappings have to fit in the FIX_BTMAP area.
379 nrpages = size >> PAGE_SHIFT;
380 if (nrpages > NR_FIX_BTMAPS) {
388 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
390 while (nrpages > 0) {
391 early_set_fixmap(idx, phys_addr);
392 phys_addr += PAGE_SIZE;
396 if (early_ioremap_debug)
397 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
399 return (void *) (offset + fix_to_virt(idx0));
402 void __init early_iounmap(void *addr, unsigned long size)
404 unsigned long virt_addr;
405 unsigned long offset;
406 unsigned int nrpages;
407 enum fixed_addresses idx;
408 unsigned int nesting;
410 nesting = --early_ioremap_nested;
411 WARN_ON(nesting < 0);
413 if (early_ioremap_debug) {
414 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
419 virt_addr = (unsigned long)addr;
420 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
424 offset = virt_addr & ~PAGE_MASK;
425 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
427 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
428 while (nrpages > 0) {
429 early_clear_fixmap(idx);
435 void __this_fixmap_does_not_exist(void)