1 /* $Id: ioremap.c,v 1.9 2004/02/25 04:59:10 lethal Exp $
5 * Re-map IO memory to kernel address space so that we can access it.
6 * This is needed for high PCI addresses that aren't mapped in the
7 * 640k-1MB IO memory area on PC's
9 * (C) Copyright 1995 1996 Linus Torvalds
12 #include <linux/vmalloc.h>
16 #include <asm/pgalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 static inline void remap_area_pte(pte_t * pte, unsigned long address,
21 unsigned long size, unsigned long phys_addr, unsigned long flags)
25 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
26 _PAGE_DIRTY | _PAGE_ACCESSED |
27 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
35 pfn = phys_addr >> PAGE_SHIFT;
37 if (!pte_none(*pte)) {
38 printk("remap_area_pte: page already exists\n");
41 set_pte(pte, pfn_pte(pfn, pgprot));
45 } while (address && (address < end));
48 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
49 unsigned long size, unsigned long phys_addr, unsigned long flags)
53 address &= ~PGDIR_MASK;
61 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
64 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
65 address = (address + PMD_SIZE) & PMD_MASK;
67 } while (address && (address < end));
71 int remap_area_pages(unsigned long address, unsigned long phys_addr,
72 unsigned long size, unsigned long flags)
76 unsigned long end = address + size;
79 dir = pgd_offset_k(address);
83 spin_lock(&init_mm.page_table_lock);
86 pmd = pmd_alloc(&init_mm, dir, address);
90 if (remap_area_pmd(pmd, address, end - address,
91 phys_addr + address, flags))
94 address = (address + PGDIR_SIZE) & PGDIR_MASK;
96 } while (address && (address < end));
97 spin_unlock(&init_mm.page_table_lock);
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
111 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
112 * have to convert them into an offset in a page-aligned mapping, but the
113 * caller shouldn't need to know that small detail.
115 void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
118 struct vm_struct * area;
119 unsigned long offset, last_addr;
121 /* Don't allow wraparound or zero size */
122 last_addr = phys_addr + size - 1;
123 if (!size || last_addr < phys_addr)
127 * Don't remap the low PCI/ISA area, it's always mapped..
129 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
130 return phys_to_virt(phys_addr);
133 * Don't allow anybody to remap normal RAM that we're using..
135 if (phys_addr < virt_to_phys(high_memory))
139 * Mappings have to be page-aligned
141 offset = phys_addr & ~PAGE_MASK;
142 phys_addr &= PAGE_MASK;
143 size = PAGE_ALIGN(last_addr+1) - phys_addr;
148 area = get_vm_area(size, VM_IOREMAP);
151 area->phys_addr = phys_addr;
153 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
157 return (void *) (offset + (char *)addr);
160 void p3_iounmap(void *addr)
162 if (addr > high_memory)
163 vfree((void *)(PAGE_MASK & (unsigned long)addr));