X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=fs%2Fbio.c;h=093345f00128b841cd7a4026378f1021e2e9785d;hb=e6a0e9cdff79e1406e5653f759aaf9f59b7ce4c8;hp=f95c8749499f9db7fe799594161a183ed9896910;hpb=c45aa055c32b488fc3fd73c760df372b09acf69a;p=powerpc.git diff --git a/fs/bio.c b/fs/bio.c index f95c874949..093345f001 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -28,9 +28,9 @@ #include #include /* for struct sg_iovec */ -#define BIO_POOL_SIZE 256 +#define BIO_POOL_SIZE 2 -static kmem_cache_t *bio_slab __read_mostly; +static struct kmem_cache *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -38,13 +38,13 @@ static kmem_cache_t *bio_slab __read_mostly; * a small number of entries is fine, not going to be performance critical. * basically we just need to survive */ -#define BIO_SPLIT_ENTRIES 8 +#define BIO_SPLIT_ENTRIES 2 mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; char *name; - kmem_cache_t *slab; + struct kmem_cache *slab; }; /* @@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, break; } - if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { - ret = -EINVAL; + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) break; - } len -= bytes; } @@ -622,10 +620,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, nr_pages += end - start; /* - * transfer and buffer must be aligned to at least hardsector - * size for now, in the future we can relax this restriction + * buffer must be aligned to at least hardsector size for now */ - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + if (uaddr & queue_dma_alignment(q)) return ERR_PTR(-EINVAL); } @@ -751,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, int write_to_vm) { struct bio *bio; - int len = 0, i; bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); @@ -766,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, */ bio_get(bio); - for (i = 0; i < iov_count; i++) - len += iov[i].iov_len; - - if (bio->bi_size == len) - return bio; - - /* - * don't support partial mappings - */ - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio); - return ERR_PTR(-EINVAL); + return bio; } static void __bio_unmap_user(struct bio *bio) @@ -931,7 +916,7 @@ void bio_set_pages_dirty(struct bio *bio) } } -static void bio_release_pages(struct bio *bio) +void bio_release_pages(struct bio *bio) { struct bio_vec *bvec = bio->bi_io_vec; int i; @@ -955,16 +940,16 @@ static void bio_release_pages(struct bio *bio) * run one bio_put() against the BIO. */ -static void bio_dirty_fn(void *data); +static void bio_dirty_fn(struct work_struct *work); -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ -static void bio_dirty_fn(void *data) +static void bio_dirty_fn(struct work_struct *work) { unsigned long flags; struct bio *bio; @@ -1135,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ -static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) +static int biovec_create_pools(struct bio_set *bs, int pool_entries) { int i; @@ -1143,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) struct biovec_slab *bp = bvec_slabs + i; mempool_t **bvp = bs->bvec_pools + i; - if (pool_entries > 1 && i >= scale) - pool_entries >>= 1; - *bvp = mempool_create_slab_pool(pool_entries, bp->slab); if (!*bvp) return -ENOMEM; @@ -1176,7 +1158,7 @@ void bioset_free(struct bio_set *bs) kfree(bs); } -struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) +struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) { struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); @@ -1187,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) if (!bs->bio_pool) goto bad; - if (!biovec_create_pools(bs, bvec_pool_size, scale)) + if (!biovec_create_pools(bs, bvec_pool_size)) return bs; bad: @@ -1211,38 +1193,11 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { - int megabytes, bvec_pool_entries; - int scale = BIOVEC_NR_POOLS; - - bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); biovec_init_slabs(); - megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); - - /* - * find out where to start scaling - */ - if (megabytes <= 16) - scale = 0; - else if (megabytes <= 32) - scale = 1; - else if (megabytes <= 64) - scale = 2; - else if (megabytes <= 96) - scale = 3; - else if (megabytes <= 128) - scale = 4; - - /* - * Limit number of entries reserved -- mempools are only used when - * the system is completely unable to allocate memory, so we only - * need enough to make progress. - */ - bvec_pool_entries = 1 + scale; - - fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); + fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); if (!fs_bio_set) panic("bio: can't allocate bios\n");