2 * High memory handling common code and variables.
4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
8 * Redesigned the x86 32-bit VM architecture to deal with
9 * 64-bit physical space. With current x86 CPUs this
10 * means up to 64 Gigabytes physical RAM.
12 * Rewrote high memory support to move the page cache into
13 * high memory. Implemented permanent (schedulable) kmaps
14 * based on Linus' idea.
16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/swap.h>
23 #include <linux/slab.h>
26 * Virtual_count is not a pure "count".
27 * 0 means that it is not mapped, and has not been mapped
28 * since a TLB flush - it is usable.
29 * 1 means that there are no users, but it has been mapped
30 * since the last TLB flush - so we can't use it.
31 * n means that there are (n-1) current users of it.
33 static int pkmap_count[LAST_PKMAP];
34 static unsigned int last_pkmap_nr;
35 static spinlock_cacheline_t kmap_lock_cacheline = {SPIN_LOCK_UNLOCKED};
36 #define kmap_lock kmap_lock_cacheline.lock
38 pte_t * pkmap_page_table;
40 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
42 static void flush_all_zero_pkmaps(void)
48 for (i = 0; i < LAST_PKMAP; i++) {
52 * zero means we don't have anything to do,
53 * >1 means that it is still in use. Only
54 * a count of 1 means that it is free but
55 * needs to be unmapped
57 if (pkmap_count[i] != 1)
62 if (pte_none(pkmap_page_table[i]))
66 * Don't need an atomic fetch-and-clear op here;
67 * no-one has the page mapped, and cannot get at
68 * its virtual address (and hence PTE) without first
69 * getting the kmap_lock (which is held here).
70 * So no dangers, even with speculative execution.
72 page = pte_page(pkmap_page_table[i]);
73 pte_clear(&pkmap_page_table[i]);
80 static inline unsigned long map_new_virtual(struct page *page)
87 /* Find an empty entry */
89 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
91 flush_all_zero_pkmaps();
94 if (!pkmap_count[last_pkmap_nr])
95 break; /* Found a usable entry */
100 * Sleep for somebody else to unmap their entries
103 DECLARE_WAITQUEUE(wait, current);
105 current->state = TASK_UNINTERRUPTIBLE;
106 add_wait_queue(&pkmap_map_wait, &wait);
107 spin_unlock(&kmap_lock);
109 remove_wait_queue(&pkmap_map_wait, &wait);
110 spin_lock(&kmap_lock);
112 /* Somebody else might have mapped it while we slept */
114 return (unsigned long) page->virtual;
120 vaddr = PKMAP_ADDR(last_pkmap_nr);
121 set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
123 pkmap_count[last_pkmap_nr] = 1;
124 page->virtual = (void *) vaddr;
129 void *kmap_high(struct page *page)
134 * For highmem pages, we can't trust "virtual" until
135 * after we have the lock.
137 * We cannot call this from interrupts, as it may block
139 spin_lock(&kmap_lock);
140 vaddr = (unsigned long) page->virtual;
142 vaddr = map_new_virtual(page);
143 pkmap_count[PKMAP_NR(vaddr)]++;
144 if (pkmap_count[PKMAP_NR(vaddr)] < 2)
146 spin_unlock(&kmap_lock);
147 return (void*) vaddr;
150 void kunmap_high(struct page *page)
156 spin_lock(&kmap_lock);
157 vaddr = (unsigned long) page->virtual;
160 nr = PKMAP_NR(vaddr);
163 * A count must never go down to zero
164 * without a TLB flush!
167 switch (--pkmap_count[nr]) {
172 * Avoid an unnecessary wake_up() function call.
173 * The common case is pkmap_count[] == 1, but
175 * The tasks queued in the wait-queue are guarded
176 * by both the lock in the wait-queue-head and by
177 * the kmap_lock. As the kmap_lock is held here,
178 * no need for the wait-queue-head's lock. Simply
179 * test if the queue is empty.
181 need_wakeup = waitqueue_active(&pkmap_map_wait);
183 spin_unlock(&kmap_lock);
185 /* do wake-up, if needed, race-free outside of the spin lock */
187 wake_up(&pkmap_map_wait);
193 * This lock gets no contention at all, normally.
195 static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;
197 int nr_emergency_pages;
198 static LIST_HEAD(emergency_pages);
200 int nr_emergency_bhs;
201 static LIST_HEAD(emergency_bhs);
204 * Simple bounce buffer support for highmem pages.
205 * This will be moved to the block layer in 2.5.
208 static inline void copy_from_high_bh (struct buffer_head *to,
209 struct buffer_head *from)
214 p_from = from->b_page;
216 vfrom = kmap_atomic(p_from, KM_USER0);
217 memcpy(to->b_data, vfrom + bh_offset(from), to->b_size);
218 kunmap_atomic(vfrom, KM_USER0);
221 static inline void copy_to_high_bh_irq (struct buffer_head *to,
222 struct buffer_head *from)
231 vto = kmap_atomic(p_to, KM_BOUNCE_READ);
232 memcpy(vto + bh_offset(to), from->b_data, to->b_size);
233 kunmap_atomic(vto, KM_BOUNCE_READ);
234 __restore_flags(flags);
237 static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
240 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
243 bh_orig->b_end_io(bh_orig, uptodate);
247 spin_lock_irqsave(&emergency_lock, flags);
248 if (nr_emergency_pages >= POOL_SIZE)
252 * We are abusing page->list to manage
253 * the highmem emergency pool:
255 list_add(&page->list, &emergency_pages);
256 nr_emergency_pages++;
259 if (nr_emergency_bhs >= POOL_SIZE) {
261 /* Don't clobber the constructed slab cache */
262 init_waitqueue_head(&bh->b_wait);
264 kmem_cache_free(bh_cachep, bh);
267 * Ditto in the bh case, here we abuse b_inode_buffers:
269 list_add(&bh->b_inode_buffers, &emergency_bhs);
272 spin_unlock_irqrestore(&emergency_lock, flags);
275 static __init int init_emergency_pool(void)
284 spin_lock_irq(&emergency_lock);
285 while (nr_emergency_pages < POOL_SIZE) {
286 struct page * page = alloc_page(GFP_ATOMIC);
288 printk("couldn't refill highmem emergency pages");
291 list_add(&page->list, &emergency_pages);
292 nr_emergency_pages++;
294 while (nr_emergency_bhs < POOL_SIZE) {
295 struct buffer_head * bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
297 printk("couldn't refill highmem emergency bhs");
300 list_add(&bh->b_inode_buffers, &emergency_bhs);
303 spin_unlock_irq(&emergency_lock);
304 printk("allocated %d pages and %d bhs reserved for the highmem bounces\n",
305 nr_emergency_pages, nr_emergency_bhs);
310 __initcall(init_emergency_pool);
312 static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
314 bounce_end_io(bh, uptodate);
317 static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
319 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
322 copy_to_high_bh_irq(bh_orig, bh);
323 bounce_end_io(bh, uptodate);
326 struct page *alloc_bounce_page (void)
328 struct list_head *tmp;
331 page = alloc_page(GFP_NOHIGHIO);
335 * No luck. First, kick the VM so it doesn't idle around while
336 * we are using up our emergency rations.
342 * Try to allocate from the emergency pool.
344 tmp = &emergency_pages;
345 spin_lock_irq(&emergency_lock);
346 if (!list_empty(tmp)) {
347 page = list_entry(tmp->next, struct page, list);
349 nr_emergency_pages--;
351 spin_unlock_irq(&emergency_lock);
355 /* we need to wait I/O completion */
356 run_task_queue(&tq_disk);
362 struct buffer_head *alloc_bounce_bh (void)
364 struct list_head *tmp;
365 struct buffer_head *bh;
367 bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO);
371 * No luck. First, kick the VM so it doesn't idle around while
372 * we are using up our emergency rations.
378 * Try to allocate from the emergency pool.
380 tmp = &emergency_bhs;
381 spin_lock_irq(&emergency_lock);
382 if (!list_empty(tmp)) {
383 bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
387 spin_unlock_irq(&emergency_lock);
391 /* we need to wait I/O completion */
392 run_task_queue(&tq_disk);
398 struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
401 struct buffer_head *bh;
403 if (!PageHighMem(bh_orig->b_page))
406 bh = alloc_bounce_bh();
408 * This is wasteful for 1k buffers, but this is a stopgap measure
409 * and we are being ineffective anyway. This approach simplifies
410 * things immensly. On boxes with more than 4GB RAM this should
411 * not be an issue anyway.
413 page = alloc_bounce_page();
415 set_bh_page(bh, page, 0);
418 bh->b_blocknr = bh_orig->b_blocknr;
419 bh->b_size = bh_orig->b_size;
421 bh->b_dev = bh_orig->b_dev;
422 bh->b_count = bh_orig->b_count;
423 bh->b_rdev = bh_orig->b_rdev;
424 bh->b_state = bh_orig->b_state;
426 bh->b_flushtime = jiffies;
427 bh->b_next_free = NULL;
428 bh->b_prev_free = NULL;
429 /* bh->b_this_page */
430 bh->b_reqnext = NULL;
435 bh->b_end_io = bounce_end_io_write;
436 copy_from_high_bh(bh, bh_orig);
438 bh->b_end_io = bounce_end_io_read;
439 bh->b_private = (void *)bh_orig;
440 bh->b_rsector = bh_orig->b_rsector;
442 memset(&bh->b_wait, -1, sizeof(bh->b_wait));