X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;ds=sidebyside;f=mm%2Fhighmem.c;h=9b2a5403c447a7bd0f147700a0db48df5b0b58f4;hb=00ab956f2fefd3673edc16df55beed21834b7bdd;hp=ce2e7e8bbfa7102a78473d1a47c24a41f7788b83;hpb=8a212ab6b8a4ccc6f3c3d1beba5f92655c576404;p=powerpc.git diff --git a/mm/highmem.c b/mm/highmem.c index ce2e7e8bbf..9b2a5403c4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -26,18 +26,14 @@ #include #include #include +#include #include static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) { - return alloc_page(gfp_mask | GFP_DMA); -} - -static void page_pool_free(void *page, void *data) -{ - __free_page(page); + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } /* @@ -50,11 +46,6 @@ static void page_pool_free(void *page, void *data) */ #ifdef CONFIG_HIGHMEM -static void *page_pool_alloc(gfp_t gfp_mask, void *data) -{ - return alloc_page(gfp_mask); -} - static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); @@ -83,8 +74,7 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - if (pte_none(pkmap_page_table[i])) - BUG(); + BUG_ON(pte_none(pkmap_page_table[i])); /* * Don't need an atomic fetch-and-clear op here; @@ -167,8 +157,7 @@ void fastcall *kmap_high(struct page *page) if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; - if (pkmap_count[PKMAP_NR(vaddr)] < 2) - BUG(); + BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); spin_unlock(&kmap_lock); return (void*) vaddr; } @@ -183,8 +172,7 @@ void fastcall kunmap_high(struct page *page) spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); - if (!vaddr) - BUG(); + BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* @@ -228,9 +216,8 @@ static __init int init_emergency_pool(void) if (!i.totalhigh) return 0; - page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); - if (!page_pool) - BUG(); + page_pool = mempool_create_page_pool(POOL_SIZE, 0); + BUG_ON(!page_pool); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); return 0; @@ -271,9 +258,9 @@ int init_emergency_isa_pool(void) if (isa_page_pool) return 0; - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); - if (!isa_page_pool) - BUG(); + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); + BUG_ON(!isa_page_pool); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); return 0; @@ -328,15 +315,15 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) if (bvec->bv_page == org_vec->bv_page) continue; - mempool_free(bvec->bv_page, pool); - dec_page_state(nr_bounce); + dec_zone_page_state(bvec->bv_page, NR_BOUNCE); + mempool_free(bvec->bv_page, pool); } bio_endio(bio_orig, bio_orig->bi_size, err); bio_put(bio); } -static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) { if (bio->bi_size) return 1; @@ -383,7 +370,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int } static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, - mempool_t *pool) + mempool_t *pool) { struct page *page; struct bio *bio = NULL; @@ -410,7 +397,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_len = from->bv_len; to->bv_offset = from->bv_offset; - inc_page_state(nr_bounce); + inc_zone_page_state(to->bv_page, NR_BOUNCE); if (rw == WRITE) { char *vto, *vfrom; @@ -483,6 +470,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) pool = isa_page_pool; } + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + /* * slow path */