#include <asm/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
+#include <asm/unaligned.h>
#include "dm.h"
struct crypt_io {
struct dm_target *target;
struct bio *base_bio;
- struct bio *first_clone;
struct work_struct work;
atomic_t pending;
int error;
*/
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
- struct crypto_cipher *iv_gen_private;
+ union {
+ struct crypto_cipher *essiv_tfm;
+ int benbi_shift;
+ } iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
-static kmem_cache_t *_crypt_io_pool;
+static struct kmem_cache *_crypt_io_pool;
+
+static void clone_init(struct crypt_io *, struct bio *);
/*
* Different IV generation algorithms:
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
*
+ * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
+ * (needed for LRW-32-AES and possible other narrow block modes)
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
}
kfree(salt);
- cc->iv_gen_private = essiv_tfm;
+ cc->iv_gen_private.essiv_tfm = essiv_tfm;
return 0;
}
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
- crypto_free_cipher(cc->iv_gen_private);
- cc->iv_gen_private = NULL;
+ crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+ cc->iv_gen_private.essiv_tfm = NULL;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+ return 0;
+}
+
+static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
+ int log = ilog2(bs);
+
+ /* we need to calculate how far we must shift the sector count
+ * to get the cipher block count, we use this shift in _gen */
+
+ if (1 << log != bs) {
+ ti->error = "cypher blocksize is not a power of 2";
+ return -EINVAL;
+ }
+
+ if (log > 9) {
+ ti->error = "cypher blocksize is > 512";
+ return -EINVAL;
+ }
+
+ cc->iv_gen_private.benbi_shift = 9 - log;
+
+ return 0;
+}
+
+static void crypt_iv_benbi_dtr(struct crypt_config *cc)
+{
+}
+
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+ __be64 val;
+
+ memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
+
+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+ put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
+
return 0;
}
.generator = crypt_iv_essiv_gen
};
+static struct crypt_iv_operations crypt_iv_benbi_ops = {
+ .ctr = crypt_iv_benbi_ctr,
+ .dtr = crypt_iv_benbi_dtr,
+ .generator = crypt_iv_benbi_gen
+};
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
struct scatterlist *in, unsigned int length,
int write, sector_t sector)
{
- u8 iv[cc->iv_size];
+ u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
struct blkcipher_desc desc = {
.tfm = cc->tfm,
.info = iv,
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
-static struct bio *
-crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
- struct bio *base_bio, unsigned int *bio_vec_idx)
+static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
+ unsigned int *bio_vec_idx)
{
+ struct crypt_config *cc = io->target->private;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
- if (base_bio) {
- clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
- __bio_clone(clone, base_bio);
- } else
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
-
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
return NULL;
- clone->bi_destructor = dm_crypt_bio_destructor;
+ clone_init(io, clone);
/* if the last bio was not complete, continue where that one ended */
clone->bi_idx = *bio_vec_idx;
if (!atomic_dec_and_test(&io->pending))
return;
- if (io->first_clone)
- bio_put(io->first_clone);
-
bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
mempool_free(io, cc->io_pool);
* interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io)
{
- INIT_WORK(&io->work, kcryptd_do_work, io);
+ INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
}
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
+ clone->bi_destructor = dm_crypt_bio_destructor;
}
static void process_read(struct crypt_io *io)
}
clone_init(io, clone);
- clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(cc, base_bio->bi_size,
- io->first_clone, &bvec_idx);
+ clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
return;
return;
}
- clone_init(io, clone);
clone->bi_sector = cc->start + sector;
-
- if (!io->first_clone) {
- /*
- * hold a reference to the first clone, because it
- * holds the bio_vec array and that can't be freed
- * before all other clones are released
- */
- bio_get(clone);
- io->first_clone = clone;
- }
-
remaining -= clone->bi_size;
sector += bio_sectors(clone);
- /* prevent bio_put of first_clone */
+ /* Grab another reference to the io struct
+ * before we kick off the request */
if (remaining)
atomic_inc(&io->pending);
generic_make_request(clone);
+ /* Do not reference clone after this - it
+ * may be gone already. */
+
/* out of memory -> run queues */
if (remaining)
- congestion_wait(bio_data_dir(clone), HZ/100);
+ congestion_wait(WRITE, HZ/100);
}
}
dec_pending(io, crypt_convert(cc, &ctx));
}
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = data;
+ struct crypt_io *io = container_of(work, struct crypt_io, work);
if (io->post_process)
process_read_endio(io);
cc->tfm = tfm;
/*
- * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
+ * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
* See comments at iv code
*/
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
+ else if (strcmp(ivmode, "benbi") == 0)
+ cc->iv_gen_ops = &crypt_iv_benbi_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;
goto bad4;
}
- cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4);
+ cc->bs = bioset_create(MIN_IOS, MIN_IOS);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad_bs;
struct crypt_config *cc = ti->private;
struct crypt_io *io;
+ if (bio_barrier(bio))
+ return -EOPNOTSUPP;
+
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
- io->first_clone = NULL;
io->error = io->post_process = 0;
atomic_set(&io->pending, 0);
kcryptd_queue_io(io);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
static int crypt_status(struct dm_target *ti, status_type_t type,