X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-crypt.c;h=339b575ce07fed28557a4ed6a5216d2830758bca;hb=2f9941b6c55d70103c1bc3f2c7676acd9f20bf8a;hp=a1086ee8cccde1828860a24ee8667100e88dd701;hpb=0a01707b289853f56d1c000057b27e243c039722;p=powerpc.git diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index a1086ee8cc..339b575ce0 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -33,7 +33,6 @@ struct crypt_io { struct dm_target *target; struct bio *base_bio; - struct bio *first_clone; struct work_struct work; atomic_t pending; int error; @@ -107,6 +106,8 @@ struct crypt_config { static struct kmem_cache *_crypt_io_pool; +static void clone_init(struct crypt_io *, struct bio *); + /* * Different IV generation algorithms: * @@ -220,7 +221,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); - int log = long_log2(bs); + int log = ilog2(bs); /* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */ @@ -378,25 +379,20 @@ static int crypt_convert(struct crypt_config *cc, * This should never violate the device limitations * May return a smaller bio when running out of pages */ -static struct bio * -crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, - struct bio *base_bio, unsigned int *bio_vec_idx) +static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, + unsigned int *bio_vec_idx) { + struct crypt_config *cc = io->target->private; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned int i; - if (base_bio) { - clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); - __bio_clone(clone, base_bio); - } else - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); - + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) return NULL; - clone->bi_destructor = dm_crypt_bio_destructor; + clone_init(io, clone); /* if the last bio was not complete, continue where that one ended */ clone->bi_idx = *bio_vec_idx; @@ -495,9 +491,6 @@ static void dec_pending(struct crypt_io *io, int error) if (!atomic_dec_and_test(&io->pending)) return; - if (io->first_clone) - bio_put(io->first_clone); - bio_endio(io->base_bio, io->base_bio->bi_size, io->error); mempool_free(io, cc->io_pool); @@ -562,6 +555,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; clone->bi_rw = io->base_bio->bi_rw; + clone->bi_destructor = dm_crypt_bio_destructor; } static void process_read(struct crypt_io *io) @@ -585,7 +579,6 @@ static void process_read(struct crypt_io *io) } clone_init(io, clone); - clone->bi_destructor = dm_crypt_bio_destructor; clone->bi_idx = 0; clone->bi_vcnt = bio_segments(base_bio); clone->bi_size = base_bio->bi_size; @@ -615,8 +608,7 @@ static void process_write(struct crypt_io *io) * so repeat the whole process until all the data can be handled. */ while (remaining) { - clone = crypt_alloc_buffer(cc, base_bio->bi_size, - io->first_clone, &bvec_idx); + clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx); if (unlikely(!clone)) { dec_pending(io, -ENOMEM); return; @@ -631,31 +623,23 @@ static void process_write(struct crypt_io *io) return; } - clone_init(io, clone); clone->bi_sector = cc->start + sector; - - if (!io->first_clone) { - /* - * hold a reference to the first clone, because it - * holds the bio_vec array and that can't be freed - * before all other clones are released - */ - bio_get(clone); - io->first_clone = clone; - } - remaining -= clone->bi_size; sector += bio_sectors(clone); - /* prevent bio_put of first_clone */ + /* Grab another reference to the io struct + * before we kick off the request */ if (remaining) atomic_inc(&io->pending); generic_make_request(clone); + /* Do not reference clone after this - it + * may be gone already. */ + /* out of memory -> run queues */ if (remaining) - congestion_wait(bio_data_dir(clone), HZ/100); + congestion_wait(WRITE, HZ/100); } } @@ -867,7 +851,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad4; } - cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); + cc->bs = bioset_create(MIN_IOS, MIN_IOS); if (!cc->bs) { ti->error = "Cannot allocate crypt bioset"; goto bad_bs; @@ -954,15 +938,17 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, struct crypt_config *cc = ti->private; struct crypt_io *io; + if (bio_barrier(bio)) + return -EOPNOTSUPP; + io = mempool_alloc(cc->io_pool, GFP_NOIO); io->target = ti; io->base_bio = bio; - io->first_clone = NULL; io->error = io->post_process = 0; atomic_set(&io->pending, 0); kcryptd_queue_io(io); - return 0; + return DM_MAPIO_SUBMITTED; } static int crypt_status(struct dm_target *ti, status_type_t type,