and added files
[bcm963xx.git] / kernel / linux / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  */
38
39 #include <linux/kernel_stat.h>
40 #include <linux/mm.h>
41 #include <linux/hugetlb.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/rmap.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49
50 #include <asm/pgalloc.h>
51 #include <asm/uaccess.h>
52 #include <asm/tlb.h>
53 #include <asm/tlbflush.h>
54 #include <asm/pgtable.h>
55
56 #include <linux/swapops.h>
57 #include <linux/elf.h>
58
59 #ifndef CONFIG_DISCONTIGMEM
60 /* use the per-pgdat data instead for discontigmem - mbligh */
61 unsigned long max_mapnr;
62 struct page *mem_map;
63
64 EXPORT_SYMBOL(max_mapnr);
65 EXPORT_SYMBOL(mem_map);
66 #endif
67
68 unsigned long num_physpages;
69 /*
70  * A number of key systems in x86 including ioremap() rely on the assumption
71  * that high_memory defines the upper bound on direct map memory, then end
72  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
73  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
74  * and ZONE_HIGHMEM.
75  */
76 void * high_memory;
77 struct page *highmem_start_page;
78 unsigned long vmalloc_earlyreserve;
79
80 EXPORT_SYMBOL(num_physpages);
81 EXPORT_SYMBOL(highmem_start_page);
82 EXPORT_SYMBOL(high_memory);
83 EXPORT_SYMBOL(vmalloc_earlyreserve);
84
85 /*
86  * We special-case the C-O-W ZERO_PAGE, because it's such
87  * a common occurrence (no need to read the page to know
88  * that it's zero - better for the cache and memory subsystem).
89  */
90 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
91 {
92         if (from == ZERO_PAGE(address)) {
93                 clear_user_highpage(to, address);
94                 return;
95         }
96         copy_user_highpage(to, from, address);
97 }
98
99 /*
100  * Note: this doesn't free the actual pages themselves. That
101  * has been handled earlier when unmapping all the memory regions.
102  */
103 static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
104 {
105         struct page *page;
106
107         if (pmd_none(*dir))
108                 return;
109         if (unlikely(pmd_bad(*dir))) {
110                 pmd_ERROR(*dir);
111                 pmd_clear(dir);
112                 return;
113         }
114         page = pmd_page(*dir);
115         pmd_clear(dir);
116         dec_page_state(nr_page_table_pages);
117         pte_free_tlb(tlb, page);
118 }
119
120 static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
121 {
122         int j;
123         pmd_t * pmd;
124
125         if (pgd_none(*dir))
126                 return;
127         if (unlikely(pgd_bad(*dir))) {
128                 pgd_ERROR(*dir);
129                 pgd_clear(dir);
130                 return;
131         }
132         pmd = pmd_offset(dir, 0);
133         pgd_clear(dir);
134         for (j = 0; j < PTRS_PER_PMD ; j++)
135                 free_one_pmd(tlb, pmd+j);
136         pmd_free_tlb(tlb, pmd);
137 }
138
139 /*
140  * This function clears all user-level page tables of a process - this
141  * is needed by execve(), so that old pages aren't in the way.
142  *
143  * Must be called with pagetable lock held.
144  */
145 void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
146 {
147         pgd_t * page_dir = tlb->mm->pgd;
148
149         page_dir += first;
150         do {
151                 free_one_pgd(tlb, page_dir);
152                 page_dir++;
153         } while (--nr);
154 }
155
156 pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
157 {
158         if (!pmd_present(*pmd)) {
159                 struct page *new;
160
161                 spin_unlock(&mm->page_table_lock);
162                 new = pte_alloc_one(mm, address);
163                 spin_lock(&mm->page_table_lock);
164                 if (!new)
165                         return NULL;
166
167                 /*
168                  * Because we dropped the lock, we should re-check the
169                  * entry, as somebody else could have populated it..
170                  */
171                 if (pmd_present(*pmd)) {
172                         pte_free(new);
173                         goto out;
174                 }
175                 inc_page_state(nr_page_table_pages);
176                 pmd_populate(mm, pmd, new);
177         }
178 out:
179         return pte_offset_map(pmd, address);
180 }
181
182 pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
183 {
184         if (!pmd_present(*pmd)) {
185                 pte_t *new;
186
187                 spin_unlock(&mm->page_table_lock);
188                 new = pte_alloc_one_kernel(mm, address);
189                 spin_lock(&mm->page_table_lock);
190                 if (!new)
191                         return NULL;
192
193                 /*
194                  * Because we dropped the lock, we should re-check the
195                  * entry, as somebody else could have populated it..
196                  */
197                 if (pmd_present(*pmd)) {
198                         pte_free_kernel(new);
199                         goto out;
200                 }
201                 pmd_populate_kernel(mm, pmd, new);
202         }
203 out:
204         return pte_offset_kernel(pmd, address);
205 }
206 #define PTE_TABLE_MASK  ((PTRS_PER_PTE-1) * sizeof(pte_t))
207 #define PMD_TABLE_MASK  ((PTRS_PER_PMD-1) * sizeof(pmd_t))
208
209 /*
210  * copy one vm_area from one task to the other. Assumes the page tables
211  * already present in the new task to be cleared in the whole range
212  * covered by this vma.
213  *
214  * 08Jan98 Merged into one routine from several inline routines to reduce
215  *         variable count and make things faster. -jj
216  *
217  * dst->page_table_lock is held on entry and exit,
218  * but may be dropped within pmd_alloc() and pte_alloc_map().
219  */
220 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
221                         struct vm_area_struct *vma)
222 {
223         pgd_t * src_pgd, * dst_pgd;
224         unsigned long address = vma->vm_start;
225         unsigned long end = vma->vm_end;
226         unsigned long cow;
227
228         if (is_vm_hugetlb_page(vma))
229                 return copy_hugetlb_page_range(dst, src, vma);
230
231         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
232         src_pgd = pgd_offset(src, address)-1;
233         dst_pgd = pgd_offset(dst, address)-1;
234
235         for (;;) {
236                 pmd_t * src_pmd, * dst_pmd;
237
238                 src_pgd++; dst_pgd++;
239                 
240                 /* copy_pmd_range */
241                 
242                 if (pgd_none(*src_pgd))
243                         goto skip_copy_pmd_range;
244                 if (unlikely(pgd_bad(*src_pgd))) {
245                         pgd_ERROR(*src_pgd);
246                         pgd_clear(src_pgd);
247 skip_copy_pmd_range:    address = (address + PGDIR_SIZE) & PGDIR_MASK;
248                         if (!address || (address >= end))
249                                 goto out;
250                         continue;
251                 }
252
253                 src_pmd = pmd_offset(src_pgd, address);
254                 dst_pmd = pmd_alloc(dst, dst_pgd, address);
255                 if (!dst_pmd)
256                         goto nomem;
257
258                 do {
259                         pte_t * src_pte, * dst_pte;
260                 
261                         /* copy_pte_range */
262                 
263                         if (pmd_none(*src_pmd))
264                                 goto skip_copy_pte_range;
265                         if (unlikely(pmd_bad(*src_pmd))) {
266                                 pmd_ERROR(*src_pmd);
267                                 pmd_clear(src_pmd);
268 skip_copy_pte_range:
269                                 address = (address + PMD_SIZE) & PMD_MASK;
270                                 if (address >= end)
271                                         goto out;
272                                 goto cont_copy_pmd_range;
273                         }
274
275                         dst_pte = pte_alloc_map(dst, dst_pmd, address);
276                         if (!dst_pte)
277                                 goto nomem;
278                         spin_lock(&src->page_table_lock);       
279                         src_pte = pte_offset_map_nested(src_pmd, address);
280                         do {
281                                 pte_t pte = *src_pte;
282                                 struct page *page;
283                                 unsigned long pfn;
284
285                                 /* copy_one_pte */
286
287                                 if (pte_none(pte))
288                                         goto cont_copy_pte_range_noset;
289                                 /* pte contains position in swap, so copy. */
290                                 if (!pte_present(pte)) {
291                                         if (!pte_file(pte))
292                                                 swap_duplicate(pte_to_swp_entry(pte));
293                                         set_pte(dst_pte, pte);
294                                         goto cont_copy_pte_range_noset;
295                                 }
296                                 pfn = pte_pfn(pte);
297                                 /* the pte points outside of valid memory, the
298                                  * mapping is assumed to be good, meaningful
299                                  * and not mapped via rmap - duplicate the
300                                  * mapping as is.
301                                  */
302                                 page = NULL;
303                                 if (pfn_valid(pfn)) 
304                                         page = pfn_to_page(pfn); 
305
306                                 if (!page || PageReserved(page)) {
307                                         set_pte(dst_pte, pte);
308                                         goto cont_copy_pte_range_noset;
309                                 }
310
311                                 /*
312                                  * If it's a COW mapping, write protect it both
313                                  * in the parent and the child
314                                  */
315                                 if (cow) {
316                                         ptep_set_wrprotect(src_pte);
317                                         pte = *src_pte;
318                                 }
319
320                                 /*
321                                  * If it's a shared mapping, mark it clean in
322                                  * the child
323                                  */
324                                 if (vma->vm_flags & VM_SHARED)
325                                         pte = pte_mkclean(pte);
326                                 pte = pte_mkold(pte);
327                                 get_page(page);
328                                 dst->rss++;
329                                 set_pte(dst_pte, pte);
330                                 page_dup_rmap(page);
331 cont_copy_pte_range_noset:
332                                 address += PAGE_SIZE;
333                                 if (address >= end) {
334                                         pte_unmap_nested(src_pte);
335                                         pte_unmap(dst_pte);
336                                         goto out_unlock;
337                                 }
338                                 src_pte++;
339                                 dst_pte++;
340                         } while ((unsigned long)src_pte & PTE_TABLE_MASK);
341                         pte_unmap_nested(src_pte-1);
342                         pte_unmap(dst_pte-1);
343                         spin_unlock(&src->page_table_lock);
344                         cond_resched_lock(&dst->page_table_lock);
345 cont_copy_pmd_range:
346                         src_pmd++;
347                         dst_pmd++;
348                 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
349         }
350 out_unlock:
351         spin_unlock(&src->page_table_lock);
352 out:
353         return 0;
354 nomem:
355         return -ENOMEM;
356 }
357
358 static void zap_pte_range(struct mmu_gather *tlb,
359                 pmd_t *pmd, unsigned long address,
360                 unsigned long size, struct zap_details *details)
361 {
362         unsigned long offset;
363         pte_t *ptep;
364
365         if (pmd_none(*pmd))
366                 return;
367         if (unlikely(pmd_bad(*pmd))) {
368                 pmd_ERROR(*pmd);
369                 pmd_clear(pmd);
370                 return;
371         }
372         ptep = pte_offset_map(pmd, address);
373         offset = address & ~PMD_MASK;
374         if (offset + size > PMD_SIZE)
375                 size = PMD_SIZE - offset;
376         size &= PAGE_MASK;
377         if (details && !details->check_mapping && !details->nonlinear_vma)
378                 details = NULL;
379         for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
380                 pte_t pte = *ptep;
381                 if (pte_none(pte))
382                         continue;
383                 if (pte_present(pte)) {
384                         struct page *page = NULL;
385                         unsigned long pfn = pte_pfn(pte);
386                         if (pfn_valid(pfn)) {
387                                 page = pfn_to_page(pfn);
388                                 if (PageReserved(page))
389                                         page = NULL;
390                         }
391                         if (unlikely(details) && page) {
392                                 /*
393                                  * unmap_shared_mapping_pages() wants to
394                                  * invalidate cache without truncating:
395                                  * unmap shared but keep private pages.
396                                  */
397                                 if (details->check_mapping &&
398                                     details->check_mapping != page->mapping)
399                                         continue;
400                                 /*
401                                  * Each page->index must be checked when
402                                  * invalidating or truncating nonlinear.
403                                  */
404                                 if (details->nonlinear_vma &&
405                                     (page->index < details->first_index ||
406                                      page->index > details->last_index))
407                                         continue;
408                         }
409                         pte = ptep_get_and_clear(ptep);
410                         tlb_remove_tlb_entry(tlb, ptep, address+offset);
411                         if (unlikely(!page))
412                                 continue;
413                         if (unlikely(details) && details->nonlinear_vma
414                             && linear_page_index(details->nonlinear_vma,
415                                         address+offset) != page->index)
416                                 set_pte(ptep, pgoff_to_pte(page->index));
417                         if (pte_dirty(pte))
418                                 set_page_dirty(page);
419                         if (pte_young(pte) && !PageAnon(page))
420                                 mark_page_accessed(page);
421                         tlb->freed++;
422                         page_remove_rmap(page);
423                         tlb_remove_page(tlb, page);
424                         continue;
425                 }
426                 /*
427                  * If details->check_mapping, we leave swap entries;
428                  * if details->nonlinear_vma, we leave file entries.
429                  */
430                 if (unlikely(details))
431                         continue;
432                 if (!pte_file(pte))
433                         free_swap_and_cache(pte_to_swp_entry(pte));
434                 pte_clear(ptep);
435         }
436         pte_unmap(ptep-1);
437 }
438
439 static void zap_pmd_range(struct mmu_gather *tlb,
440                 pgd_t * dir, unsigned long address,
441                 unsigned long size, struct zap_details *details)
442 {
443         pmd_t * pmd;
444         unsigned long end;
445
446         if (pgd_none(*dir))
447                 return;
448         if (unlikely(pgd_bad(*dir))) {
449                 pgd_ERROR(*dir);
450                 pgd_clear(dir);
451                 return;
452         }
453         pmd = pmd_offset(dir, address);
454         end = address + size;
455         if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
456                 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
457         do {
458                 zap_pte_range(tlb, pmd, address, end - address, details);
459                 address = (address + PMD_SIZE) & PMD_MASK; 
460                 pmd++;
461         } while (address && (address < end));
462 }
463
464 static void unmap_page_range(struct mmu_gather *tlb,
465                 struct vm_area_struct *vma, unsigned long address,
466                 unsigned long end, struct zap_details *details)
467 {
468         pgd_t * dir;
469
470         BUG_ON(address >= end);
471         dir = pgd_offset(vma->vm_mm, address);
472         tlb_start_vma(tlb, vma);
473         do {
474                 zap_pmd_range(tlb, dir, address, end - address, details);
475                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
476                 dir++;
477         } while (address && (address < end));
478         tlb_end_vma(tlb, vma);
479 }
480
481 /* Dispose of an entire struct mmu_gather per rescheduling point */
482 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
483 #define ZAP_BLOCK_SIZE  (FREE_PTE_NR * PAGE_SIZE)
484 #endif
485
486 /* For UP, 256 pages at a time gives nice low latency */
487 #if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
488 #define ZAP_BLOCK_SIZE  (256 * PAGE_SIZE)
489 #endif
490
491 /* No preempt: go for improved straight-line efficiency */
492 #if !defined(CONFIG_PREEMPT)
493 #define ZAP_BLOCK_SIZE  (1024 * PAGE_SIZE)
494 #endif
495
496 /**
497  * unmap_vmas - unmap a range of memory covered by a list of vma's
498  * @tlbp: address of the caller's struct mmu_gather
499  * @mm: the controlling mm_struct
500  * @vma: the starting vma
501  * @start_addr: virtual address at which to start unmapping
502  * @end_addr: virtual address at which to end unmapping
503  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
504  * @details: details of nonlinear truncation or shared cache invalidation
505  *
506  * Returns the number of vma's which were covered by the unmapping.
507  *
508  * Unmap all pages in the vma list.  Called under page_table_lock.
509  *
510  * We aim to not hold page_table_lock for too long (for scheduling latency
511  * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
512  * return the ending mmu_gather to the caller.
513  *
514  * Only addresses between `start' and `end' will be unmapped.
515  *
516  * The VMA list must be sorted in ascending virtual address order.
517  *
518  * unmap_vmas() assumes that the caller will flush the whole unmapped address
519  * range after unmap_vmas() returns.  So the only responsibility here is to
520  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
521  * drops the lock and schedules.
522  */
523 int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
524                 struct vm_area_struct *vma, unsigned long start_addr,
525                 unsigned long end_addr, unsigned long *nr_accounted,
526                 struct zap_details *details)
527 {
528         unsigned long zap_bytes = ZAP_BLOCK_SIZE;
529         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
530         int tlb_start_valid = 0;
531         int ret = 0;
532         int atomic = details && details->atomic;
533
534         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
535                 unsigned long start;
536                 unsigned long end;
537
538                 start = max(vma->vm_start, start_addr);
539                 if (start >= vma->vm_end)
540                         continue;
541                 end = min(vma->vm_end, end_addr);
542                 if (end <= vma->vm_start)
543                         continue;
544
545                 if (vma->vm_flags & VM_ACCOUNT)
546                         *nr_accounted += (end - start) >> PAGE_SHIFT;
547
548                 ret++;
549                 while (start != end) {
550                         unsigned long block;
551
552                         if (!tlb_start_valid) {
553                                 tlb_start = start;
554                                 tlb_start_valid = 1;
555                         }
556
557                         if (is_vm_hugetlb_page(vma)) {
558                                 block = end - start;
559                                 unmap_hugepage_range(vma, start, end);
560                         } else {
561                                 block = min(zap_bytes, end - start);
562                                 unmap_page_range(*tlbp, vma, start,
563                                                 start + block, details);
564                         }
565
566                         start += block;
567                         zap_bytes -= block;
568                         if ((long)zap_bytes > 0)
569                                 continue;
570                         if (!atomic && need_resched()) {
571                                 int fullmm = tlb_is_full_mm(*tlbp);
572                                 tlb_finish_mmu(*tlbp, tlb_start, start);
573                                 cond_resched_lock(&mm->page_table_lock);
574                                 *tlbp = tlb_gather_mmu(mm, fullmm);
575                                 tlb_start_valid = 0;
576                         }
577                         zap_bytes = ZAP_BLOCK_SIZE;
578                 }
579         }
580         return ret;
581 }
582
583 /**
584  * zap_page_range - remove user pages in a given range
585  * @vma: vm_area_struct holding the applicable pages
586  * @address: starting address of pages to zap
587  * @size: number of bytes to zap
588  * @details: details of nonlinear truncation or shared cache invalidation
589  */
590 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
591                 unsigned long size, struct zap_details *details)
592 {
593         struct mm_struct *mm = vma->vm_mm;
594         struct mmu_gather *tlb;
595         unsigned long end = address + size;
596         unsigned long nr_accounted = 0;
597
598         if (is_vm_hugetlb_page(vma)) {
599                 zap_hugepage_range(vma, address, size);
600                 return;
601         }
602
603         lru_add_drain();
604         spin_lock(&mm->page_table_lock);
605         tlb = tlb_gather_mmu(mm, 0);
606         unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
607         tlb_finish_mmu(tlb, address, end);
608         spin_unlock(&mm->page_table_lock);
609 }
610
611 /*
612  * Do a quick page-table lookup for a single page.
613  * mm->page_table_lock must be held.
614  */
615 struct page *
616 follow_page(struct mm_struct *mm, unsigned long address, int write) 
617 {
618         pgd_t *pgd;
619         pmd_t *pmd;
620         pte_t *ptep, pte;
621         unsigned long pfn;
622         struct page *page;
623
624         page = follow_huge_addr(mm, address, write);
625         if (! IS_ERR(page))
626                 return page;
627
628         pgd = pgd_offset(mm, address);
629         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
630                 goto out;
631
632         pmd = pmd_offset(pgd, address);
633         if (pmd_none(*pmd))
634                 goto out;
635         if (pmd_huge(*pmd))
636                 return follow_huge_pmd(mm, address, pmd, write);
637         if (unlikely(pmd_bad(*pmd)))
638                 goto out;
639
640         ptep = pte_offset_map(pmd, address);
641         if (!ptep)
642                 goto out;
643
644         pte = *ptep;
645         pte_unmap(ptep);
646         if (pte_present(pte)) {
647                 if (write && !pte_write(pte))
648                         goto out;
649                 pfn = pte_pfn(pte);
650                 if (pfn_valid(pfn)) {
651                         page = pfn_to_page(pfn);
652                         if (write && !pte_dirty(pte) && !PageDirty(page))
653                                 set_page_dirty(page);
654                         mark_page_accessed(page);
655                         return page;
656                 }
657         }
658
659 out:
660         return NULL;
661 }
662
663 /* 
664  * Given a physical address, is there a useful struct page pointing to
665  * it?  This may become more complex in the future if we start dealing
666  * with IO-aperture pages for direct-IO.
667  */
668
669 static inline struct page *get_page_map(struct page *page)
670 {
671         if (!pfn_valid(page_to_pfn(page)))
672                 return NULL;
673         return page;
674 }
675
676
677 static inline int
678 untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
679                          unsigned long address)
680 {
681         pgd_t *pgd;
682         pmd_t *pmd;
683
684         /* Check if the vma is for an anonymous mapping. */
685         if (vma->vm_ops && vma->vm_ops->nopage)
686                 return 0;
687
688         /* Check if page directory entry exists. */
689         pgd = pgd_offset(mm, address);
690         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
691                 return 1;
692
693         /* Check if page middle directory entry exists. */
694         pmd = pmd_offset(pgd, address);
695         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
696                 return 1;
697
698         /* There is a pte slot for 'address' in 'mm'. */
699         return 0;
700 }
701
702
703 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
704                 unsigned long start, int len, int write, int force,
705                 struct page **pages, struct vm_area_struct **vmas)
706 {
707         int i;
708         unsigned int flags;
709
710         /* 
711          * Require read or write permissions.
712          * If 'force' is set, we only require the "MAY" flags.
713          */
714         flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
715         flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
716         i = 0;
717
718         do {
719                 struct vm_area_struct * vma;
720
721                 vma = find_extend_vma(mm, start);
722                 if (!vma && in_gate_area(tsk, start)) {
723                         unsigned long pg = start & PAGE_MASK;
724                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
725                         pgd_t *pgd;
726                         pmd_t *pmd;
727                         pte_t *pte;
728                         if (write) /* user gate pages are read-only */
729                                 return i ? : -EFAULT;
730                         pgd = pgd_offset_gate(mm, pg);
731                         if (!pgd)
732                                 return i ? : -EFAULT;
733                         pmd = pmd_offset(pgd, pg);
734                         if (!pmd)
735                                 return i ? : -EFAULT;
736                         pte = pte_offset_map(pmd, pg);
737                         if (!pte)
738                                 return i ? : -EFAULT;
739                         if (!pte_present(*pte)) {
740                                 pte_unmap(pte);
741                                 return i ? : -EFAULT;
742                         }
743                         if (pages) {
744                                 pages[i] = pte_page(*pte);
745                                 get_page(pages[i]);
746                         }
747                         pte_unmap(pte);
748                         if (vmas)
749                                 vmas[i] = gate_vma;
750                         i++;
751                         start += PAGE_SIZE;
752                         len--;
753                         continue;
754                 }
755
756                 if (!vma || (pages && (vma->vm_flags & VM_IO))
757                                 || !(flags & vma->vm_flags))
758                         return i ? : -EFAULT;
759
760                 if (is_vm_hugetlb_page(vma)) {
761                         i = follow_hugetlb_page(mm, vma, pages, vmas,
762                                                 &start, &len, i);
763                         continue;
764                 }
765                 spin_lock(&mm->page_table_lock);
766                 do {
767                         struct page *map;
768                         int lookup_write = write;
769                         while (!(map = follow_page(mm, start, lookup_write))) {
770                                 /*
771                                  * Shortcut for anonymous pages. We don't want
772                                  * to force the creation of pages tables for
773                                  * insanly big anonymously mapped areas that
774                                  * nobody touched so far. This is important
775                                  * for doing a core dump for these mappings.
776                                  */
777                                 if (!lookup_write &&
778                                     untouched_anonymous_page(mm,vma,start)) {
779                                         map = ZERO_PAGE(start);
780                                         break;
781                                 }
782                                 spin_unlock(&mm->page_table_lock);
783                                 switch (handle_mm_fault(mm,vma,start,write)) {
784                                 case VM_FAULT_MINOR:
785                                         tsk->min_flt++;
786                                         break;
787                                 case VM_FAULT_MAJOR:
788                                         tsk->maj_flt++;
789                                         break;
790                                 case VM_FAULT_SIGBUS:
791                                         return i ? i : -EFAULT;
792                                 case VM_FAULT_OOM:
793                                         return i ? i : -ENOMEM;
794                                 default:
795                                         BUG();
796                                 }
797                                 /*
798                                  * Now that we have performed a write fault
799                                  * and surely no longer have a shared page we
800                                  * shouldn't write, we shouldn't ignore an
801                                  * unwritable page in the page table if
802                                  * we are forcing write access.
803                                  */
804                                 lookup_write = write && !force;
805                                 spin_lock(&mm->page_table_lock);
806                         }
807                         if (pages) {
808                                 pages[i] = get_page_map(map);
809                                 if (!pages[i]) {
810                                         spin_unlock(&mm->page_table_lock);
811                                         while (i--)
812                                                 page_cache_release(pages[i]);
813                                         i = -EFAULT;
814                                         goto out;
815                                 }
816                                 flush_dcache_page(pages[i]);
817                                 if (!PageReserved(pages[i]))
818                                         page_cache_get(pages[i]);
819                         }
820                         if (vmas)
821                                 vmas[i] = vma;
822                         i++;
823                         start += PAGE_SIZE;
824                         len--;
825                 } while(len && start < vma->vm_end);
826                 spin_unlock(&mm->page_table_lock);
827         } while(len);
828 out:
829         return i;
830 }
831
832 EXPORT_SYMBOL(get_user_pages);
833
834 static void zeromap_pte_range(pte_t * pte, unsigned long address,
835                                      unsigned long size, pgprot_t prot)
836 {
837         unsigned long end;
838
839         address &= ~PMD_MASK;
840         end = address + size;
841         if (end > PMD_SIZE)
842                 end = PMD_SIZE;
843         do {
844                 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
845                 BUG_ON(!pte_none(*pte));
846                 set_pte(pte, zero_pte);
847                 address += PAGE_SIZE;
848                 pte++;
849         } while (address && (address < end));
850 }
851
852 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
853                                     unsigned long size, pgprot_t prot)
854 {
855         unsigned long base, end;
856
857         base = address & PGDIR_MASK;
858         address &= ~PGDIR_MASK;
859         end = address + size;
860         if (end > PGDIR_SIZE)
861                 end = PGDIR_SIZE;
862         do {
863                 pte_t * pte = pte_alloc_map(mm, pmd, base + address);
864                 if (!pte)
865                         return -ENOMEM;
866                 zeromap_pte_range(pte, base + address, end - address, prot);
867                 pte_unmap(pte);
868                 address = (address + PMD_SIZE) & PMD_MASK;
869                 pmd++;
870         } while (address && (address < end));
871         return 0;
872 }
873
874 int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
875 {
876         int error = 0;
877         pgd_t * dir;
878         unsigned long beg = address;
879         unsigned long end = address + size;
880         struct mm_struct *mm = vma->vm_mm;
881
882         dir = pgd_offset(mm, address);
883         flush_cache_range(vma, beg, end);
884         if (address >= end)
885                 BUG();
886
887         spin_lock(&mm->page_table_lock);
888         do {
889                 pmd_t *pmd = pmd_alloc(mm, dir, address);
890                 error = -ENOMEM;
891                 if (!pmd)
892                         break;
893                 error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
894                 if (error)
895                         break;
896                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
897                 dir++;
898         } while (address && (address < end));
899         /*
900          * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
901          */
902         flush_tlb_range(vma, beg, end);
903         spin_unlock(&mm->page_table_lock);
904         return error;
905 }
906
907 /*
908  * maps a range of physical memory into the requested pages. the old
909  * mappings are removed. any references to nonexistent pages results
910  * in null mappings (currently treated as "copy-on-access")
911  */
912 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
913         unsigned long phys_addr, pgprot_t prot)
914 {
915         unsigned long end;
916         unsigned long pfn;
917
918         address &= ~PMD_MASK;
919         end = address + size;
920         if (end > PMD_SIZE)
921                 end = PMD_SIZE;
922         pfn = phys_addr >> PAGE_SHIFT;
923         do {
924                 BUG_ON(!pte_none(*pte));
925                 if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
926                         set_pte(pte, pfn_pte(pfn, prot));
927                 address += PAGE_SIZE;
928                 pfn++;
929                 pte++;
930         } while (address && (address < end));
931 }
932
933 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
934         unsigned long phys_addr, pgprot_t prot)
935 {
936         unsigned long base, end;
937
938         base = address & PGDIR_MASK;
939         address &= ~PGDIR_MASK;
940         end = address + size;
941         if (end > PGDIR_SIZE)
942                 end = PGDIR_SIZE;
943         phys_addr -= address;
944         do {
945                 pte_t * pte = pte_alloc_map(mm, pmd, base + address);
946                 if (!pte)
947                         return -ENOMEM;
948                 remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
949                 pte_unmap(pte);
950                 address = (address + PMD_SIZE) & PMD_MASK;
951                 pmd++;
952         } while (address && (address < end));
953         return 0;
954 }
955
956 /*  Note: this is only safe if the mm semaphore is held when called. */
957 int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
958 {
959         int error = 0;
960         pgd_t * dir;
961         unsigned long beg = from;
962         unsigned long end = from + size;
963         struct mm_struct *mm = vma->vm_mm;
964
965         phys_addr -= from;
966         dir = pgd_offset(mm, from);
967         flush_cache_range(vma, beg, end);
968         if (from >= end)
969                 BUG();
970
971         spin_lock(&mm->page_table_lock);
972         do {
973                 pmd_t *pmd = pmd_alloc(mm, dir, from);
974                 error = -ENOMEM;
975                 if (!pmd)
976                         break;
977                 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
978                 if (error)
979                         break;
980                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
981                 dir++;
982         } while (from && (from < end));
983         /*
984          * Why flush? remap_pte_range has a BUG_ON for !pte_none()
985          */
986         flush_tlb_range(vma, beg, end);
987         spin_unlock(&mm->page_table_lock);
988         return error;
989 }
990
991 EXPORT_SYMBOL(remap_page_range);
992
993 /*
994  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
995  * servicing faults for write access.  In the normal case, do always want
996  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
997  * that do not have writing enabled, when used by access_process_vm.
998  */
999 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1000 {
1001         if (likely(vma->vm_flags & VM_WRITE))
1002                 pte = pte_mkwrite(pte);
1003         return pte;
1004 }
1005
1006 /*
1007  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
1008  */
1009 static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
1010                 pte_t *page_table)
1011 {
1012         pte_t entry;
1013
1014         flush_cache_page(vma, address);
1015         entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
1016                               vma);
1017         ptep_establish(vma, address, page_table, entry);
1018         update_mmu_cache(vma, address, entry);
1019 }
1020
1021 /*
1022  * This routine handles present pages, when users try to write
1023  * to a shared page. It is done by copying the page to a new address
1024  * and decrementing the shared-page counter for the old page.
1025  *
1026  * Goto-purists beware: the only reason for goto's here is that it results
1027  * in better assembly code.. The "default" path will see no jumps at all.
1028  *
1029  * Note that this routine assumes that the protection checks have been
1030  * done by the caller (the low-level page fault routine in most cases).
1031  * Thus we can safely just mark it writable once we've done any necessary
1032  * COW.
1033  *
1034  * We also mark the page dirty at this point even though the page will
1035  * change only once the write actually happens. This avoids a few races,
1036  * and potentially makes it more efficient.
1037  *
1038  * We hold the mm semaphore and the page_table_lock on entry and exit
1039  * with the page_table_lock released.
1040  */
1041 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
1042         unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
1043 {
1044         struct page *old_page, *new_page;
1045         unsigned long pfn = pte_pfn(pte);
1046         pte_t entry;
1047
1048         if (unlikely(!pfn_valid(pfn))) {
1049                 /*
1050                  * This should really halt the system so it can be debugged or
1051                  * at least the kernel stops what it's doing before it corrupts
1052                  * data, but for the moment just pretend this is OOM.
1053                  */
1054                 pte_unmap(page_table);
1055                 printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
1056                                 address);
1057                 spin_unlock(&mm->page_table_lock);
1058                 return VM_FAULT_OOM;
1059         }
1060         old_page = pfn_to_page(pfn);
1061
1062         if (!TestSetPageLocked(old_page)) {
1063                 int reuse = can_share_swap_page(old_page);
1064                 unlock_page(old_page);
1065                 if (reuse) {
1066                         flush_cache_page(vma, address);
1067                         entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
1068                                               vma);
1069                         ptep_set_access_flags(vma, address, page_table, entry, 1);
1070                         update_mmu_cache(vma, address, entry);
1071                         pte_unmap(page_table);
1072                         spin_unlock(&mm->page_table_lock);
1073                         return VM_FAULT_MINOR;
1074                 }
1075         }
1076         pte_unmap(page_table);
1077
1078         /*
1079          * Ok, we need to copy. Oh, well..
1080          */
1081         if (!PageReserved(old_page))
1082                 page_cache_get(old_page);
1083         spin_unlock(&mm->page_table_lock);
1084
1085         if (unlikely(anon_vma_prepare(vma)))
1086                 goto no_new_page;
1087         new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1088         if (!new_page)
1089                 goto no_new_page;
1090         copy_cow_page(old_page,new_page,address);
1091
1092         /*
1093          * Re-check the pte - we dropped the lock
1094          */
1095         spin_lock(&mm->page_table_lock);
1096         page_table = pte_offset_map(pmd, address);
1097         if (likely(pte_same(*page_table, pte))) {
1098                 if (PageReserved(old_page))
1099                         ++mm->rss;
1100                 else
1101                         page_remove_rmap(old_page);
1102                 break_cow(vma, new_page, address, page_table);
1103                 lru_cache_add_active(new_page);
1104                 page_add_anon_rmap(new_page, vma, address);
1105
1106                 /* Free the old page.. */
1107                 new_page = old_page;
1108         }
1109         pte_unmap(page_table);
1110         page_cache_release(new_page);
1111         page_cache_release(old_page);
1112         spin_unlock(&mm->page_table_lock);
1113         return VM_FAULT_MINOR;
1114
1115 no_new_page:
1116         page_cache_release(old_page);
1117         return VM_FAULT_OOM;
1118 }
1119
1120 /*
1121  * Helper function for unmap_mapping_range().
1122  */
1123 static inline void unmap_mapping_range_list(struct prio_tree_root *root,
1124                                             struct zap_details *details)
1125 {
1126         struct vm_area_struct *vma = NULL;
1127         struct prio_tree_iter iter;
1128         pgoff_t vba, vea, zba, zea;
1129
1130         while ((vma = vma_prio_tree_next(vma, root, &iter,
1131                         details->first_index, details->last_index)) != NULL) {
1132                 vba = vma->vm_pgoff;
1133                 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1134                 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1135                 zba = details->first_index;
1136                 if (zba < vba)
1137                         zba = vba;
1138                 zea = details->last_index;
1139                 if (zea > vea)
1140                         zea = vea;
1141                 zap_page_range(vma,
1142                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1143                         (zea - zba + 1) << PAGE_SHIFT, details);
1144         }
1145 }
1146
1147 /**
1148  * unmap_mapping_range - unmap the portion of all mmaps
1149  * in the specified address_space corresponding to the specified
1150  * page range in the underlying file.
1151  * @address_space: the address space containing mmaps to be unmapped.
1152  * @holebegin: byte in first page to unmap, relative to the start of
1153  * the underlying file.  This will be rounded down to a PAGE_SIZE
1154  * boundary.  Note that this is different from vmtruncate(), which
1155  * must keep the partial page.  In contrast, we must get rid of
1156  * partial pages.
1157  * @holelen: size of prospective hole in bytes.  This will be rounded
1158  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1159  * end of the file.
1160  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1161  * but 0 when invalidating pagecache, don't throw away private data.
1162  */
1163 void unmap_mapping_range(struct address_space *mapping,
1164                 loff_t const holebegin, loff_t const holelen, int even_cows)
1165 {
1166         struct zap_details details;
1167         pgoff_t hba = holebegin >> PAGE_SHIFT;
1168         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1169
1170         /* Check for overflow. */
1171         if (sizeof(holelen) > sizeof(hlen)) {
1172                 long long holeend =
1173                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1174                 if (holeend & ~(long long)ULONG_MAX)
1175                         hlen = ULONG_MAX - hba + 1;
1176         }
1177
1178         details.check_mapping = even_cows? NULL: mapping;
1179         details.nonlinear_vma = NULL;
1180         details.first_index = hba;
1181         details.last_index = hba + hlen - 1;
1182         details.atomic = 1;     /* A spinlock is held */
1183         if (details.last_index < details.first_index)
1184                 details.last_index = ULONG_MAX;
1185
1186         spin_lock(&mapping->i_mmap_lock);
1187         /* Protect against page fault */
1188         atomic_inc(&mapping->truncate_count);
1189
1190         if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1191                 unmap_mapping_range_list(&mapping->i_mmap, &details);
1192
1193         /*
1194          * In nonlinear VMAs there is no correspondence between virtual address
1195          * offset and file offset.  So we must perform an exhaustive search
1196          * across *all* the pages in each nonlinear VMA, not just the pages
1197          * whose virtual address lies outside the file truncation point.
1198          */
1199         if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
1200                 struct vm_area_struct *vma;
1201                 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1202                                                 shared.vm_set.list) {
1203                         details.nonlinear_vma = vma;
1204                         zap_page_range(vma, vma->vm_start,
1205                                 vma->vm_end - vma->vm_start, &details);
1206                 }
1207         }
1208         spin_unlock(&mapping->i_mmap_lock);
1209 }
1210 EXPORT_SYMBOL(unmap_mapping_range);
1211
1212 /*
1213  * Handle all mappings that got truncated by a "truncate()"
1214  * system call.
1215  *
1216  * NOTE! We have to be ready to update the memory sharing
1217  * between the file and the memory map for a potential last
1218  * incomplete page.  Ugly, but necessary.
1219  */
1220 int vmtruncate(struct inode * inode, loff_t offset)
1221 {
1222         struct address_space *mapping = inode->i_mapping;
1223         unsigned long limit;
1224
1225         if (inode->i_size < offset)
1226                 goto do_expand;
1227         /*
1228          * truncation of in-use swapfiles is disallowed - it would cause
1229          * subsequent swapout to scribble on the now-freed blocks.
1230          */
1231         if (IS_SWAPFILE(inode))
1232                 goto out_busy;
1233         i_size_write(inode, offset);
1234         unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1235         truncate_inode_pages(mapping, offset);
1236         goto out_truncate;
1237
1238 do_expand:
1239         limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
1240         if (limit != RLIM_INFINITY && offset > limit)
1241                 goto out_sig;
1242         if (offset > inode->i_sb->s_maxbytes)
1243                 goto out_big;
1244         i_size_write(inode, offset);
1245
1246 out_truncate:
1247         if (inode->i_op && inode->i_op->truncate)
1248                 inode->i_op->truncate(inode);
1249         return 0;
1250 out_sig:
1251         send_sig(SIGXFSZ, current, 0);
1252 out_big:
1253         return -EFBIG;
1254 out_busy:
1255         return -ETXTBSY;
1256 }
1257
1258 EXPORT_SYMBOL(vmtruncate);
1259
1260 /* 
1261  * Primitive swap readahead code. We simply read an aligned block of
1262  * (1 << page_cluster) entries in the swap area. This method is chosen
1263  * because it doesn't cost us any seek time.  We also make sure to queue
1264  * the 'original' request together with the readahead ones...  
1265  *
1266  * This has been extended to use the NUMA policies from the mm triggering
1267  * the readahead.
1268  *
1269  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1270  */
1271 void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
1272 {
1273 #ifdef CONFIG_NUMA
1274         struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
1275 #endif
1276         int i, num;
1277         struct page *new_page;
1278         unsigned long offset;
1279
1280         /*
1281          * Get the number of handles we should do readahead io to.
1282          */
1283         num = valid_swaphandles(entry, &offset);
1284         for (i = 0; i < num; offset++, i++) {
1285                 /* Ok, do the async read-ahead now */
1286                 new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1287                                                            offset), vma, addr);
1288                 if (!new_page)
1289                         break;
1290                 page_cache_release(new_page);
1291 #ifdef CONFIG_NUMA
1292                 /*
1293                  * Find the next applicable VMA for the NUMA policy.
1294                  */
1295                 addr += PAGE_SIZE;
1296                 if (addr == 0)
1297                         vma = NULL;
1298                 if (vma) {
1299                         if (addr >= vma->vm_end) {
1300                                 vma = next_vma;
1301                                 next_vma = vma ? vma->vm_next : NULL;
1302                         }
1303                         if (vma && addr < vma->vm_start)
1304                                 vma = NULL;
1305                 } else {
1306                         if (next_vma && addr >= next_vma->vm_start) {
1307                                 vma = next_vma;
1308                                 next_vma = vma->vm_next;
1309                         }
1310                 }
1311 #endif
1312         }
1313         lru_add_drain();        /* Push any new pages onto the LRU now */
1314 }
1315
1316 /*
1317  * We hold the mm semaphore and the page_table_lock on entry and
1318  * should release the pagetable lock on exit..
1319  */
1320 static int do_swap_page(struct mm_struct * mm,
1321         struct vm_area_struct * vma, unsigned long address,
1322         pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
1323 {
1324         struct page *page;
1325         swp_entry_t entry = pte_to_swp_entry(orig_pte);
1326         pte_t pte;
1327         int ret = VM_FAULT_MINOR;
1328
1329         pte_unmap(page_table);
1330         spin_unlock(&mm->page_table_lock);
1331         page = lookup_swap_cache(entry);
1332         if (!page) {
1333                 swapin_readahead(entry, address, vma);
1334                 page = read_swap_cache_async(entry, vma, address);
1335                 if (!page) {
1336                         /*
1337                          * Back out if somebody else faulted in this pte while
1338                          * we released the page table lock.
1339                          */
1340                         spin_lock(&mm->page_table_lock);
1341                         page_table = pte_offset_map(pmd, address);
1342                         if (likely(pte_same(*page_table, orig_pte)))
1343                                 ret = VM_FAULT_OOM;
1344                         else
1345                                 ret = VM_FAULT_MINOR;
1346                         pte_unmap(page_table);
1347                         spin_unlock(&mm->page_table_lock);
1348                         goto out;
1349                 }
1350
1351                 /* Had to read the page from swap area: Major fault */
1352                 ret = VM_FAULT_MAJOR;
1353                 inc_page_state(pgmajfault);
1354         }
1355
1356         mark_page_accessed(page);
1357         lock_page(page);
1358
1359         /*
1360          * Back out if somebody else faulted in this pte while we
1361          * released the page table lock.
1362          */
1363         spin_lock(&mm->page_table_lock);
1364         page_table = pte_offset_map(pmd, address);
1365         if (unlikely(!pte_same(*page_table, orig_pte))) {
1366                 pte_unmap(page_table);
1367                 spin_unlock(&mm->page_table_lock);
1368                 unlock_page(page);
1369                 page_cache_release(page);
1370                 ret = VM_FAULT_MINOR;
1371                 goto out;
1372         }
1373
1374         /* The page isn't present yet, go ahead with the fault. */
1375                 
1376         swap_free(entry);
1377         if (vm_swap_full())
1378                 remove_exclusive_swap_page(page);
1379
1380         mm->rss++;
1381         pte = mk_pte(page, vma->vm_page_prot);
1382         if (write_access && can_share_swap_page(page)) {
1383                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1384                 write_access = 0;
1385         }
1386         unlock_page(page);
1387
1388         flush_icache_page(vma, page);
1389         set_pte(page_table, pte);
1390         page_add_anon_rmap(page, vma, address);
1391
1392         if (write_access) {
1393                 if (do_wp_page(mm, vma, address,
1394                                 page_table, pmd, pte) == VM_FAULT_OOM)
1395                         ret = VM_FAULT_OOM;
1396                 goto out;
1397         }
1398
1399         /* No need to invalidate - it was non-present before */
1400         update_mmu_cache(vma, address, pte);
1401         pte_unmap(page_table);
1402         spin_unlock(&mm->page_table_lock);
1403 out:
1404         return ret;
1405 }
1406
1407 /*
1408  * We are called with the MM semaphore and page_table_lock
1409  * spinlock held to protect against concurrent faults in
1410  * multithreaded programs. 
1411  */
1412 static int
1413 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1414                 pte_t *page_table, pmd_t *pmd, int write_access,
1415                 unsigned long addr)
1416 {
1417         pte_t entry;
1418         struct page * page = ZERO_PAGE(addr);
1419
1420         /* Read-only mapping of ZERO_PAGE. */
1421         entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1422
1423         /* ..except if it's a write access */
1424         if (write_access) {
1425                 /* Allocate our own private page. */
1426                 pte_unmap(page_table);
1427                 spin_unlock(&mm->page_table_lock);
1428
1429                 if (unlikely(anon_vma_prepare(vma)))
1430                         goto no_mem;
1431                 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1432                 if (!page)
1433                         goto no_mem;
1434                 clear_user_highpage(page, addr);
1435
1436                 spin_lock(&mm->page_table_lock);
1437                 page_table = pte_offset_map(pmd, addr);
1438
1439                 if (!pte_none(*page_table)) {
1440                         pte_unmap(page_table);
1441                         page_cache_release(page);
1442                         spin_unlock(&mm->page_table_lock);
1443                         goto out;
1444                 }
1445                 mm->rss++;
1446                 entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
1447                                                          vma->vm_page_prot)),
1448                                       vma);
1449                 lru_cache_add_active(page);
1450                 mark_page_accessed(page);
1451                 page_add_anon_rmap(page, vma, addr);
1452         }
1453
1454         set_pte(page_table, entry);
1455         pte_unmap(page_table);
1456
1457         /* No need to invalidate - it was non-present before */
1458         update_mmu_cache(vma, addr, entry);
1459         spin_unlock(&mm->page_table_lock);
1460 out:
1461         return VM_FAULT_MINOR;
1462 no_mem:
1463         return VM_FAULT_OOM;
1464 }
1465
1466 /*
1467  * do_no_page() tries to create a new page mapping. It aggressively
1468  * tries to share with existing pages, but makes a separate copy if
1469  * the "write_access" parameter is true in order to avoid the next
1470  * page fault.
1471  *
1472  * As this is called only for pages that do not currently exist, we
1473  * do not need to flush old virtual caches or the TLB.
1474  *
1475  * This is called with the MM semaphore held and the page table
1476  * spinlock held. Exit with the spinlock released.
1477  */
1478 static int
1479 do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1480         unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
1481 {
1482         struct page * new_page;
1483         struct address_space *mapping = NULL;
1484         pte_t entry;
1485         int sequence = 0;
1486         int ret = VM_FAULT_MINOR;
1487         int anon = 0;
1488
1489         if (!vma->vm_ops || !vma->vm_ops->nopage)
1490                 return do_anonymous_page(mm, vma, page_table,
1491                                         pmd, write_access, address);
1492         pte_unmap(page_table);
1493         spin_unlock(&mm->page_table_lock);
1494
1495         if (vma->vm_file) {
1496                 mapping = vma->vm_file->f_mapping;
1497                 sequence = atomic_read(&mapping->truncate_count);
1498         }
1499         smp_rmb();  /* Prevent CPU from reordering lock-free ->nopage() */
1500 retry:
1501         new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1502
1503         /* no page was available -- either SIGBUS or OOM */
1504         if (new_page == NOPAGE_SIGBUS)
1505                 return VM_FAULT_SIGBUS;
1506         if (new_page == NOPAGE_OOM)
1507                 return VM_FAULT_OOM;
1508
1509         /*
1510          * Should we do an early C-O-W break?
1511          */
1512         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1513                 struct page *page;
1514
1515                 if (unlikely(anon_vma_prepare(vma)))
1516                         goto oom;
1517                 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1518                 if (!page)
1519                         goto oom;
1520                 copy_user_highpage(page, new_page, address);
1521                 page_cache_release(new_page);
1522                 new_page = page;
1523                 anon = 1;
1524         }
1525
1526         spin_lock(&mm->page_table_lock);
1527         /*
1528          * For a file-backed vma, someone could have truncated or otherwise
1529          * invalidated this page.  If unmap_mapping_range got called,
1530          * retry getting the page.
1531          */
1532         if (mapping &&
1533               (unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
1534                 sequence = atomic_read(&mapping->truncate_count);
1535                 spin_unlock(&mm->page_table_lock);
1536                 page_cache_release(new_page);
1537                 goto retry;
1538         }
1539         page_table = pte_offset_map(pmd, address);
1540
1541         /*
1542          * This silly early PAGE_DIRTY setting removes a race
1543          * due to the bad i386 page protection. But it's valid
1544          * for other architectures too.
1545          *
1546          * Note that if write_access is true, we either now have
1547          * an exclusive copy of the page, or this is a shared mapping,
1548          * so we can make it writable and dirty to avoid having to
1549          * handle that later.
1550          */
1551         /* Only go through if we didn't race with anybody else... */
1552         if (pte_none(*page_table)) {
1553                 if (!PageReserved(new_page))
1554                         ++mm->rss;
1555                 flush_icache_page(vma, new_page);
1556                 entry = mk_pte(new_page, vma->vm_page_prot);
1557                 if (write_access)
1558                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1559                 set_pte(page_table, entry);
1560                 if (anon) {
1561                         lru_cache_add_active(new_page);
1562                         page_add_anon_rmap(new_page, vma, address);
1563                 } else
1564                         page_add_file_rmap(new_page);
1565                 pte_unmap(page_table);
1566         } else {
1567                 /* One of our sibling threads was faster, back out. */
1568                 pte_unmap(page_table);
1569                 page_cache_release(new_page);
1570                 spin_unlock(&mm->page_table_lock);
1571                 goto out;
1572         }
1573
1574         /* no need to invalidate: a not-present page shouldn't be cached */
1575         update_mmu_cache(vma, address, entry);
1576         spin_unlock(&mm->page_table_lock);
1577 out:
1578         return ret;
1579 oom:
1580         page_cache_release(new_page);
1581         ret = VM_FAULT_OOM;
1582         goto out;
1583 }
1584
1585 /*
1586  * Fault of a previously existing named mapping. Repopulate the pte
1587  * from the encoded file_pte if possible. This enables swappable
1588  * nonlinear vmas.
1589  */
1590 static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
1591         unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
1592 {
1593         unsigned long pgoff;
1594         int err;
1595
1596         BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
1597         /*
1598          * Fall back to the linear mapping if the fs does not support
1599          * ->populate:
1600          */
1601         if (!vma->vm_ops || !vma->vm_ops->populate || 
1602                         (write_access && !(vma->vm_flags & VM_SHARED))) {
1603                 pte_clear(pte);
1604                 return do_no_page(mm, vma, address, write_access, pte, pmd);
1605         }
1606
1607         pgoff = pte_to_pgoff(*pte);
1608
1609         pte_unmap(pte);
1610         spin_unlock(&mm->page_table_lock);
1611
1612         err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
1613         if (err == -ENOMEM)
1614                 return VM_FAULT_OOM;
1615         if (err)
1616                 return VM_FAULT_SIGBUS;
1617         return VM_FAULT_MAJOR;
1618 }
1619
1620 /*
1621  * These routines also need to handle stuff like marking pages dirty
1622  * and/or accessed for architectures that don't do it in hardware (most
1623  * RISC architectures).  The early dirtying is also good on the i386.
1624  *
1625  * There is also a hook called "update_mmu_cache()" that architectures
1626  * with external mmu caches can use to update those (ie the Sparc or
1627  * PowerPC hashed page tables that act as extended TLBs).
1628  *
1629  * Note the "page_table_lock". It is to protect against kswapd removing
1630  * pages from under us. Note that kswapd only ever _removes_ pages, never
1631  * adds them. As such, once we have noticed that the page is not present,
1632  * we can drop the lock early.
1633  *
1634  * The adding of pages is protected by the MM semaphore (which we hold),
1635  * so we don't need to worry about a page being suddenly been added into
1636  * our VM.
1637  *
1638  * We enter with the pagetable spinlock held, we are supposed to
1639  * release it when done.
1640  */
1641 static inline int handle_pte_fault(struct mm_struct *mm,
1642         struct vm_area_struct * vma, unsigned long address,
1643         int write_access, pte_t *pte, pmd_t *pmd)
1644 {
1645         pte_t entry;
1646
1647         entry = *pte;
1648         if (!pte_present(entry)) {
1649                 /*
1650                  * If it truly wasn't present, we know that kswapd
1651                  * and the PTE updates will not touch it later. So
1652                  * drop the lock.
1653                  */
1654                 if (pte_none(entry))
1655                         return do_no_page(mm, vma, address, write_access, pte, pmd);
1656                 if (pte_file(entry))
1657                         return do_file_page(mm, vma, address, write_access, pte, pmd);
1658                 return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
1659         }
1660
1661         if (write_access) {
1662                 if (!pte_write(entry))
1663                         return do_wp_page(mm, vma, address, pte, pmd, entry);
1664
1665                 entry = pte_mkdirty(entry);
1666         }
1667         entry = pte_mkyoung(entry);
1668         ptep_set_access_flags(vma, address, pte, entry, write_access);
1669         update_mmu_cache(vma, address, entry);
1670         pte_unmap(pte);
1671         spin_unlock(&mm->page_table_lock);
1672         return VM_FAULT_MINOR;
1673 }
1674
1675 /*
1676  * By the time we get here, we already hold the mm semaphore
1677  */
1678 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
1679         unsigned long address, int write_access)
1680 {
1681         pgd_t *pgd;
1682         pmd_t *pmd;
1683
1684         __set_current_state(TASK_RUNNING);
1685         pgd = pgd_offset(mm, address);
1686
1687         inc_page_state(pgfault);
1688
1689         if (is_vm_hugetlb_page(vma))
1690                 return VM_FAULT_SIGBUS; /* mapping truncation does this. */
1691
1692         /*
1693          * We need the page table lock to synchronize with kswapd
1694          * and the SMP-safe atomic PTE updates.
1695          */
1696         spin_lock(&mm->page_table_lock);
1697         pmd = pmd_alloc(mm, pgd, address);
1698
1699         if (pmd) {
1700                 pte_t * pte = pte_alloc_map(mm, pmd, address);
1701                 if (pte)
1702                         return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
1703         }
1704         spin_unlock(&mm->page_table_lock);
1705         return VM_FAULT_OOM;
1706 }
1707
1708 /*
1709  * Allocate page middle directory.
1710  *
1711  * We've already handled the fast-path in-line, and we own the
1712  * page table lock.
1713  *
1714  * On a two-level page table, this ends up actually being entirely
1715  * optimized away.
1716  */
1717 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1718 {
1719         pmd_t *new;
1720
1721         spin_unlock(&mm->page_table_lock);
1722         new = pmd_alloc_one(mm, address);
1723         spin_lock(&mm->page_table_lock);
1724         if (!new)
1725                 return NULL;
1726
1727         /*
1728          * Because we dropped the lock, we should re-check the
1729          * entry, as somebody else could have populated it..
1730          */
1731         if (pgd_present(*pgd)) {
1732                 pmd_free(new);
1733                 goto out;
1734         }
1735         pgd_populate(mm, pgd, new);
1736 out:
1737         return pmd_offset(pgd, address);
1738 }
1739
1740 int make_pages_present(unsigned long addr, unsigned long end)
1741 {
1742         int ret, len, write;
1743         struct vm_area_struct * vma;
1744
1745         vma = find_vma(current->mm, addr);
1746         write = (vma->vm_flags & VM_WRITE) != 0;
1747         if (addr >= end)
1748                 BUG();
1749         if (end > vma->vm_end)
1750                 BUG();
1751         len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
1752         ret = get_user_pages(current, current->mm, addr,
1753                         len, write, 0, NULL, NULL);
1754         if (ret < 0)
1755                 return ret;
1756         return ret == len ? 0 : -1;
1757 }
1758
1759 /* 
1760  * Map a vmalloc()-space virtual address to the physical page.
1761  */
1762 struct page * vmalloc_to_page(void * vmalloc_addr)
1763 {
1764         unsigned long addr = (unsigned long) vmalloc_addr;
1765         struct page *page = NULL;
1766         pgd_t *pgd = pgd_offset_k(addr);
1767         pmd_t *pmd;
1768         pte_t *ptep, pte;
1769   
1770         if (!pgd_none(*pgd)) {
1771                 pmd = pmd_offset(pgd, addr);
1772                 if (!pmd_none(*pmd)) {
1773                         preempt_disable();
1774                         ptep = pte_offset_map(pmd, addr);
1775                         pte = *ptep;
1776                         if (pte_present(pte))
1777                                 page = pte_page(pte);
1778                         pte_unmap(ptep);
1779                         preempt_enable();
1780                 }
1781         }
1782         return page;
1783 }
1784
1785 EXPORT_SYMBOL(vmalloc_to_page);
1786
1787 #if !defined(CONFIG_ARCH_GATE_AREA)
1788
1789 #if defined(AT_SYSINFO_EHDR)
1790 struct vm_area_struct gate_vma;
1791
1792 static int __init gate_vma_init(void)
1793 {
1794         gate_vma.vm_mm = NULL;
1795         gate_vma.vm_start = FIXADDR_USER_START;
1796         gate_vma.vm_end = FIXADDR_USER_END;
1797         gate_vma.vm_page_prot = PAGE_READONLY;
1798         gate_vma.vm_flags = 0;
1799         return 0;
1800 }
1801 __initcall(gate_vma_init);
1802 #endif
1803
1804 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
1805 {
1806 #ifdef AT_SYSINFO_EHDR
1807         return &gate_vma;
1808 #else
1809         return NULL;
1810 #endif
1811 }
1812
1813 int in_gate_area(struct task_struct *task, unsigned long addr)
1814 {
1815 #ifdef AT_SYSINFO_EHDR
1816         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
1817                 return 1;
1818 #endif
1819         return 0;
1820 }
1821
1822 #endif