3 * A copy of mm/memory.c with modifications to handle 64 bit
8 * maps a range of physical memory into the requested pages. the old
9 * mappings are removed. any references to nonexistent pages results
10 * in null mappings (currently treated as "copy-on-access")
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/swapctl.h>
18 #include <linux/iobuf.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/module.h>
23 #include <asm/pgalloc.h>
24 #include <asm/uaccess.h>
28 * Return indicates whether a page was freed so caller can adjust rss
30 static inline void forget_pte(pte_t page)
32 if (!pte_none(page)) {
33 printk("forget_pte: old mapping existed!\n");
38 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
39 phys_t phys_addr, pgprot_t prot)
50 oldpage = ptep_get_and_clear(pte);
52 page = virt_to_page(__va(phys_addr));
53 if ((!VALID_PAGE(page)) || PageReserved(page))
54 set_pte(pte, mk_pte_phys(phys_addr, prot));
57 phys_addr += PAGE_SIZE;
59 } while (address && (address < end));
62 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
63 phys_t phys_addr, pgprot_t prot)
67 address &= ~PGDIR_MASK;
73 pte_t * pte = pte_alloc(mm, pmd, address);
76 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
77 address = (address + PMD_SIZE) & PMD_MASK;
79 } while (address && (address < end));
83 extern phys_t (*fixup_bigphys_addr)(phys_t phys_addr, phys_t size);
84 /* Note: this is only safe if the mm semaphore is held when called. */
85 int remap_page_range_high(unsigned long from, phys_t phys_addr, unsigned long size, pgprot_t prot)
89 unsigned long beg = from;
90 unsigned long end = from + size;
91 struct mm_struct *mm = current->mm;
93 phys_addr = fixup_bigphys_addr(phys_addr, size);
95 dir = pgd_offset(mm, from);
96 flush_cache_range(mm, beg, end);
100 spin_lock(&mm->page_table_lock);
102 pmd_t *pmd = pmd_alloc(mm, dir, from);
106 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
109 from = (from + PGDIR_SIZE) & PGDIR_MASK;
111 } while (from && (from < end));
112 spin_unlock(&mm->page_table_lock);
113 flush_tlb_range(mm, beg, end);