more changes on original files
[linux-2.4.git] / crypto / cipher.c
1 /*
2  * Cryptographic API.
3  *
4  * Cipher operations.
5  *
6  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option) 
11  * any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/crypto.h>
16 #include <linux/errno.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <asm/scatterlist.h>
20 #include "internal.h"
21 #include "scatterwalk.h"
22
23 typedef void (cryptfn_t)(void *, u8 *, const u8 *);
24 typedef void (procfn_t)(struct crypto_tfm *, u8 *,
25                         u8*, cryptfn_t, int enc, void *, int);
26
27 static inline void xor_64(u8 *a, const u8 *b)
28 {
29         ((u32 *)a)[0] ^= ((u32 *)b)[0];
30         ((u32 *)a)[1] ^= ((u32 *)b)[1];
31 }
32
33 static inline void xor_128(u8 *a, const u8 *b)
34 {
35         ((u32 *)a)[0] ^= ((u32 *)b)[0];
36         ((u32 *)a)[1] ^= ((u32 *)b)[1];
37         ((u32 *)a)[2] ^= ((u32 *)b)[2];
38         ((u32 *)a)[3] ^= ((u32 *)b)[3];
39 }
40
41
42 /* 
43  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
44  * multiple page boundaries by using temporary blocks.  In user context,
45  * the kernel is given a chance to schedule us once per block.
46  */
47 static int crypt(struct crypto_tfm *tfm,
48                  struct scatterlist *dst,
49                  struct scatterlist *src,
50                  unsigned int nbytes, cryptfn_t crfn,
51                  procfn_t prfn, int enc, void *info)
52 {
53         struct scatter_walk walk_in, walk_out;
54         const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
55         u8 tmp_src[bsize];
56         u8 tmp_dst[bsize];
57
58         if (!nbytes)
59                 return 0;
60
61         if (nbytes % bsize) {
62                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
63                 return -EINVAL;
64         }
65
66         scatterwalk_start(&walk_in, src);
67         scatterwalk_start(&walk_out, dst);
68
69         for(;;) {
70                 u8 *src_p, *dst_p;
71                 int in_place;
72
73                 scatterwalk_map(&walk_in, 0);
74                 scatterwalk_map(&walk_out, 1);
75                 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
76                 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
77                 in_place = scatterwalk_samebuf(&walk_in, &walk_out,
78                                                src_p, dst_p);
79
80                 nbytes -= bsize;
81
82                 scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
83
84                 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
85
86                 scatterwalk_done(&walk_in, 0, nbytes);
87
88                 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
89                 scatterwalk_done(&walk_out, 1, nbytes);
90
91                 if (!nbytes)
92                         return 0;
93
94                 crypto_yield(tfm);
95         }
96 }
97
98 static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
99                         cryptfn_t fn, int enc, void *info, int in_place)
100 {
101         u8 *iv = info;
102         
103         /* Null encryption */
104         if (!iv)
105                 return;
106                 
107         if (enc) {
108                 tfm->crt_u.cipher.cit_xor_block(iv, src);
109                 fn(crypto_tfm_ctx(tfm), dst, iv);
110                 memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
111         } else {
112                 u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
113                 u8 *buf = in_place ? stack : dst;
114
115                 fn(crypto_tfm_ctx(tfm), buf, src);
116                 tfm->crt_u.cipher.cit_xor_block(buf, iv);
117                 memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
118                 if (buf != dst)
119                         memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
120         }
121 }
122
123 static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
124                         cryptfn_t fn, int enc, void *info, int in_place)
125 {
126         fn(crypto_tfm_ctx(tfm), dst, src);
127 }
128
129 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
130 {
131         struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
132         
133         if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
134                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
135                 return -EINVAL;
136         } else
137                 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
138                                        &tfm->crt_flags);
139 }
140
141 static int ecb_encrypt(struct crypto_tfm *tfm,
142                        struct scatterlist *dst,
143                        struct scatterlist *src, unsigned int nbytes)
144 {
145         return crypt(tfm, dst, src, nbytes,
146                      tfm->__crt_alg->cra_cipher.cia_encrypt,
147                      ecb_process, 1, NULL);
148 }
149
150 static int ecb_encrypt_iv(struct crypto_tfm *tfm,
151                           struct scatterlist *dst,
152                           struct scatterlist *src,
153                           unsigned int nbytes, u8 *iv)
154 {
155         ecb_encrypt(tfm, dst, src, nbytes);
156         return -ENOSYS;
157 }
158
159 static int ecb_decrypt(struct crypto_tfm *tfm,
160                        struct scatterlist *dst,
161                        struct scatterlist *src,
162                        unsigned int nbytes)
163 {
164         return crypt(tfm, dst, src, nbytes,
165                      tfm->__crt_alg->cra_cipher.cia_decrypt,
166                      ecb_process, 1, NULL);
167 }
168
169 static int ecb_decrypt_iv(struct crypto_tfm *tfm,
170                           struct scatterlist *dst,
171                           struct scatterlist *src,
172                           unsigned int nbytes, u8 *iv)
173 {
174         ecb_decrypt(tfm, dst, src, nbytes);
175         return -ENOSYS;
176 }
177
178 static int cbc_encrypt(struct crypto_tfm *tfm,
179                        struct scatterlist *dst,
180                        struct scatterlist *src,
181                        unsigned int nbytes)
182 {
183         return crypt(tfm, dst, src, nbytes,
184                      tfm->__crt_alg->cra_cipher.cia_encrypt,
185                      cbc_process, 1, tfm->crt_cipher.cit_iv);
186 }
187
188 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
189                           struct scatterlist *dst,
190                           struct scatterlist *src,
191                           unsigned int nbytes, u8 *iv)
192 {
193         return crypt(tfm, dst, src, nbytes,
194                      tfm->__crt_alg->cra_cipher.cia_encrypt,
195                      cbc_process, 1, iv);
196 }
197
198 static int cbc_decrypt(struct crypto_tfm *tfm,
199                        struct scatterlist *dst,
200                        struct scatterlist *src,
201                        unsigned int nbytes)
202 {
203         return crypt(tfm, dst, src, nbytes,
204                      tfm->__crt_alg->cra_cipher.cia_decrypt,
205                      cbc_process, 0, tfm->crt_cipher.cit_iv);
206 }
207
208 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
209                           struct scatterlist *dst,
210                           struct scatterlist *src,
211                           unsigned int nbytes, u8 *iv)
212 {
213         return crypt(tfm, dst, src, nbytes,
214                      tfm->__crt_alg->cra_cipher.cia_decrypt,
215                      cbc_process, 0, iv);
216 }
217
218 /*
219  * nocrypt*() zeroize the destination buffer to make sure we don't leak
220  * uninitialized memory contents if the caller ignores the return value.
221  * This is bad since the data in the source buffer is unused and may be
222  * lost, but an infoleak would be even worse.  The performance cost of
223  * memset() is irrelevant since a well-behaved caller would not bump into
224  * the error repeatedly.
225  */
226 static int nocrypt(struct crypto_tfm *tfm,
227                    struct scatterlist *dst,
228                    struct scatterlist *src,
229                    unsigned int nbytes)
230 {
231         memset(dst, 0, nbytes);
232         return -ENOSYS;
233 }
234
235 static int nocrypt_iv(struct crypto_tfm *tfm,
236                       struct scatterlist *dst,
237                       struct scatterlist *src,
238                       unsigned int nbytes, u8 *iv)
239 {
240         memset(dst, 0, nbytes);
241         return -ENOSYS;
242 }
243
244 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
245 {
246         u32 mode = flags & CRYPTO_TFM_MODE_MASK;
247         
248         tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
249         if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
250                 tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
251         
252         return 0;
253 }
254
255 int crypto_init_cipher_ops(struct crypto_tfm *tfm)
256 {
257         int ret = 0;
258         struct cipher_tfm *ops = &tfm->crt_cipher;
259
260         ops->cit_setkey = setkey;
261
262         switch (tfm->crt_cipher.cit_mode) {
263         case CRYPTO_TFM_MODE_ECB:
264                 ops->cit_encrypt = ecb_encrypt;
265                 ops->cit_decrypt = ecb_decrypt;
266 /* These should have been nocrypt_iv, but patch-cryptoloop-jari-2.4.22.0
267  * (and its other revisions) directly calls the *_iv() functions even in
268  * ECB mode and ignores their return value. */
269                 ops->cit_encrypt_iv = ecb_encrypt_iv;
270                 ops->cit_decrypt_iv = ecb_decrypt_iv;
271                 break;
272                 
273         case CRYPTO_TFM_MODE_CBC:
274                 ops->cit_encrypt = cbc_encrypt;
275                 ops->cit_decrypt = cbc_decrypt;
276                 ops->cit_encrypt_iv = cbc_encrypt_iv;
277                 ops->cit_decrypt_iv = cbc_decrypt_iv;
278                 break;
279                 
280         case CRYPTO_TFM_MODE_CFB:
281                 ops->cit_encrypt = nocrypt;
282                 ops->cit_decrypt = nocrypt;
283                 ops->cit_encrypt_iv = nocrypt_iv;
284                 ops->cit_decrypt_iv = nocrypt_iv;
285                 break;
286         
287         case CRYPTO_TFM_MODE_CTR:
288                 ops->cit_encrypt = nocrypt;
289                 ops->cit_decrypt = nocrypt;
290                 ops->cit_encrypt_iv = nocrypt_iv;
291                 ops->cit_decrypt_iv = nocrypt_iv;
292                 break;
293
294         default:
295                 BUG();
296         }
297         
298         if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
299                 
300                 switch (crypto_tfm_alg_blocksize(tfm)) {
301                 case 8:
302                         ops->cit_xor_block = xor_64;
303                         break;
304                         
305                 case 16:
306                         ops->cit_xor_block = xor_128;
307                         break;
308                         
309                 default:
310                         printk(KERN_WARNING "%s: block size %u not supported\n",
311                                crypto_tfm_alg_name(tfm),
312                                crypto_tfm_alg_blocksize(tfm));
313                         ret = -EINVAL;
314                         goto out;
315                 }
316                 
317                 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
318                 ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
319                 if (ops->cit_iv == NULL)
320                         ret = -ENOMEM;
321         }
322
323 out:    
324         return ret;
325 }
326
327 void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
328 {
329         if (tfm->crt_cipher.cit_iv)
330                 kfree(tfm->crt_cipher.cit_iv);
331 }