2 * linux/arch/alpha/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 #ifdef CONFIG_BLK_DEV_INITRD
24 #include <linux/blk.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/hwrpb.h>
33 #include <asm/mmu_context.h>
34 #include <asm/console.h>
37 mmu_gather_t mmu_gathers[NR_CPUS];
39 unsigned long totalram_pages;
41 extern void die_if_kernel(char *,struct pt_regs *,long);
43 struct thread_struct original_pcb;
46 struct pgtable_cache_struct quicklists;
54 ret = (pgd_t *)__get_free_page(GFP_KERNEL);
55 init = pgd_offset(&init_mm, 0UL);
58 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
59 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
60 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
62 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
65 /* The last PGD entry is the VPTB self-map. */
66 pgd_val(ret[PTRS_PER_PGD-1])
67 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
72 int do_check_pgt_cache(int low, int high)
75 if(pgtable_cache_size > high) {
78 free_pgd_slow(get_pgd_fast());
82 pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
86 pte_free_slow(pte_alloc_one_fast(NULL, 0));
89 } while(pgtable_cache_size > low);
95 * BAD_PAGE is the page that is used for page faults when linux
96 * is out-of-memory. Older versions of linux just did a
97 * do_exit(), but using this instead means there is less risk
98 * for a process dying in kernel mode, possibly leaving an inode
101 * BAD_PAGETABLE is the accompanying page-table: it is initialized
102 * to point to BAD_PAGE entries.
104 * ZERO_PAGE is a special page that is used for zero-initialized
108 __bad_pagetable(void)
110 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
111 return (pmd_t *) EMPTY_PGT;
117 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
118 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
121 #ifndef CONFIG_DISCONTIGMEM
125 long i,free = 0,total = 0,reserved = 0;
126 long shared = 0, cached = 0;
128 printk("\nMem-info:\n");
130 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
134 if (PageReserved(mem_map+i))
136 else if (PageSwapCache(mem_map+i))
138 else if (!page_count(mem_map+i))
141 shared += atomic_read(&mem_map[i].count) - 1;
143 printk("%ld pages of RAM\n",total);
144 printk("%ld free pages\n",free);
145 printk("%ld reserved pages\n",reserved);
146 printk("%ld pages shared\n",shared);
147 printk("%ld pages swap cached\n",cached);
148 printk("%ld pages in page table cache\n",pgtable_cache_size);
153 static inline unsigned long
154 load_PCB(struct thread_struct * pcb)
156 register unsigned long sp __asm__("$30");
158 return __reload_thread(pcb);
161 /* Set up initial PCB, VPTB, and other such nicities. */
164 switch_to_system_map(void)
166 unsigned long newptbr;
167 unsigned long original_pcb_ptr;
169 /* Initialize the kernel's page tables. Linux puts the vptb in
170 the last slot of the L1 page table. */
171 memset(swapper_pg_dir, 0, PAGE_SIZE);
172 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
173 pgd_val(swapper_pg_dir[1023]) =
174 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
176 /* Set the vptb. This is often done by the bootloader, but
177 shouldn't be required. */
178 if (hwrpb->vptb != 0xfffffffe00000000) {
179 wrvptptr(0xfffffffe00000000);
180 hwrpb->vptb = 0xfffffffe00000000;
181 hwrpb_update_checksum(hwrpb);
184 /* Also set up the real kernel PCB while we're at it. */
185 init_task.thread.ptbr = newptbr;
186 init_task.thread.pal_flags = 1; /* set FEN, clear everything else */
187 init_task.thread.flags = 0;
188 original_pcb_ptr = load_PCB(&init_task.thread);
191 /* Save off the contents of the original PCB so that we can
192 restore the original console's page tables for a clean reboot.
194 Note that the PCB is supposed to be a physical address, but
195 since KSEG values also happen to work, folks get confused.
198 if (original_pcb_ptr < PAGE_OFFSET) {
199 original_pcb_ptr = (unsigned long)
200 phys_to_virt(original_pcb_ptr);
202 original_pcb = *(struct thread_struct *) original_pcb_ptr;
205 int callback_init_done;
208 callback_init(void * kernel_end)
210 struct crb_struct * crb;
215 /* Starting at the HWRPB, locate the CRB. */
216 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
218 if (alpha_using_srm) {
219 /* Tell the console whither it is to be remapped. */
220 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
221 __halt(); /* "We're boned." --Bender */
223 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
224 crb->dispatch_va = (struct procdesc_struct *)
225 (VMALLOC_START + (unsigned long)crb->dispatch_va
227 crb->fixup_va = (struct procdesc_struct *)
228 (VMALLOC_START + (unsigned long)crb->fixup_va
232 switch_to_system_map();
234 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
235 these to actually remap the console. There is an assumption
236 here that only one of each is needed, and this allows for 8MB.
237 On systems with larger consoles, additional pages will be
238 allocated as needed during the mapping process.
240 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
241 we need to allocate the PGD we use for vmalloc before we start
242 forking other tasks. */
245 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
246 kernel_end = two_pages + 2*PAGE_SIZE;
247 memset(two_pages, 0, 2*PAGE_SIZE);
249 pgd = pgd_offset_k(VMALLOC_START);
250 pgd_set(pgd, (pmd_t *)two_pages);
251 pmd = pmd_offset(pgd, VMALLOC_START);
252 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
254 if (alpha_using_srm) {
255 static struct vm_struct console_remap_vm;
256 unsigned long vaddr = VMALLOC_START;
259 /* Set up the third level PTEs and update the virtual
260 addresses of the CRB entries. */
261 for (i = 0; i < crb->map_entries; ++i) {
262 unsigned long paddr = crb->map[i].pa;
263 crb->map[i].va = vaddr;
264 for (j = 0; j < crb->map[i].count; ++j) {
265 /* Newer console's (especially on larger
266 systems) may require more pages of
267 PTEs. Grab additional pages as needed. */
268 if (pmd != pmd_offset(pgd, vaddr)) {
269 memset(kernel_end, 0, PAGE_SIZE);
270 pmd = pmd_offset(pgd, vaddr);
271 pmd_set(pmd, (pte_t *)kernel_end);
272 kernel_end += PAGE_SIZE;
274 set_pte(pte_offset(pmd, vaddr),
275 mk_pte_phys(paddr, PAGE_KERNEL));
281 /* Let vmalloc know that we've allocated some space. */
282 console_remap_vm.flags = VM_ALLOC;
283 console_remap_vm.addr = VMALLOC_START;
284 console_remap_vm.size = vaddr - VMALLOC_START;
285 vmlist = &console_remap_vm;
288 callback_init_done = 1;
293 #ifndef CONFIG_DISCONTIGMEM
295 * paging_init() sets up the memory map.
300 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
301 unsigned long dma_pfn, high_pfn;
303 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
304 high_pfn = max_low_pfn;
306 if (dma_pfn >= high_pfn)
307 zones_size[ZONE_DMA] = high_pfn;
309 zones_size[ZONE_DMA] = dma_pfn;
310 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
313 /* Initialize mem_map[]. */
314 free_area_init(zones_size);
316 /* Initialize the kernel's ZERO_PGE. */
317 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
319 #endif /* CONFIG_DISCONTIGMEM */
321 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
323 srm_paging_stop (void)
325 /* Move the vptb back to where the SRM console expects it. */
326 swapper_pg_dir[1] = swapper_pg_dir[1023];
328 wrvptptr(0x200000000);
329 hwrpb->vptb = 0x200000000;
330 hwrpb_update_checksum(hwrpb);
332 /* Reload the page tables that the console had in use. */
333 load_PCB(&original_pcb);
338 #ifndef CONFIG_DISCONTIGMEM
340 printk_memory_info(void)
342 unsigned long codesize, reservedpages, datasize, initsize, tmp;
343 extern int page_is_ram(unsigned long) __init;
344 extern char _text, _etext, _data, _edata;
345 extern char __init_begin, __init_end;
347 /* printk all informations */
349 for (tmp = 0; tmp < max_low_pfn; tmp++)
351 * Only count reserved RAM pages
353 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
356 codesize = (unsigned long) &_etext - (unsigned long) &_text;
357 datasize = (unsigned long) &_edata - (unsigned long) &_data;
358 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
360 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
361 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
362 max_mapnr << (PAGE_SHIFT-10),
364 reservedpages << (PAGE_SHIFT-10),
372 max_mapnr = num_physpages = max_low_pfn;
373 totalram_pages += free_all_bootmem();
374 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
376 printk_memory_info();
378 #endif /* CONFIG_DISCONTIGMEM */
381 free_reserved_mem(void *start, void *end)
383 void *__start = start;
384 for (; __start < end; __start += PAGE_SIZE) {
385 ClearPageReserved(virt_to_page(__start));
386 set_page_count(virt_to_page(__start), 1);
387 free_page((long)__start);
395 extern char __init_begin, __init_end;
397 free_reserved_mem(&__init_begin, &__init_end);
398 printk (KERN_INFO "Freeing unused kernel memory: %ldk freed\n",
399 (&__init_end - &__init_begin) >> 10);
402 #ifdef CONFIG_BLK_DEV_INITRD
404 free_initrd_mem(unsigned long start, unsigned long end)
406 free_reserved_mem((void *)start, (void *)end);
407 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
408 (end - start) >> 10);
413 si_meminfo(struct sysinfo *val)
415 val->totalram = totalram_pages;
417 val->freeram = nr_free_pages();
418 val->bufferram = atomic_read(&buffermem_pages);
421 val->mem_unit = PAGE_SIZE;