]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[CRYPTO] blkcipher: Remove alignment restriction on block size
authorHerbert Xu <herbert@gondor.apana.org.au>
Sat, 29 Sep 2007 13:24:23 +0000 (21:24 +0800)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 10 Oct 2007 23:55:46 +0000 (16:55 -0700)
Previously we assumed for convenience that the block size is a multiple of
the algorithm's required alignment.  With the pending addition of CTR this
will no longer be the case as the block size will be 1 due to it being a
stream cipher.  However, the alignment requirement will be that of the
underlying implementation which will most likely be greater than 1.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
crypto/algapi.c
crypto/blkcipher.c

index d891f56f0e8cdb9d6d843c8209518282d70e8b7c..58cc19164801e0d634e5cc92fee4466b673d42ff 100644 (file)
@@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
        if (alg->cra_alignmask & (alg->cra_alignmask + 1))
                return -EINVAL;
 
-       if (alg->cra_alignmask & alg->cra_blocksize)
-               return -EINVAL;
-
        if (alg->cra_blocksize > PAGE_SIZE / 8)
                return -EINVAL;
 
index 9c49770837c293bff73e6f0961676d082d40c684..a3c87da23f1e1f7ad84fcf30e560a6d936267bdb 100644 (file)
@@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
                                      unsigned int alignmask)
 {
        unsigned int n;
+       unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 
        if (walk->buffer)
                goto ok;
@@ -167,8 +168,8 @@ ok:
        walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
                                          alignmask + 1);
        walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
-       walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
-                                                bsize);
+       walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
+                                                aligned_bsize, bsize);
 
        scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 
@@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
 {
        unsigned bs = crypto_blkcipher_blocksize(tfm);
        unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
-       unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
+       unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+       unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+                           (alignmask + 1);
        u8 *iv;
 
        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
@@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
                return -ENOMEM;
 
        iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
-       iv = blkcipher_get_spot(iv, bs) + bs;
-       iv = blkcipher_get_spot(iv, bs) + bs;
+       iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+       iv = blkcipher_get_spot(iv, bs) + aligned_bs;
        iv = blkcipher_get_spot(iv, ivsize);
 
        walk->iv = memcpy(iv, walk->iv, ivsize);