aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-07-06 13:52:09 -0700
committerDavid S. Miller <davem@davemloft.net>2005-07-06 13:52:09 -0700
commit95477377995aefa2ec1654a9a3777bd57ea99146 (patch)
tree7aa4d6173de13c81c2fa0e4d2f9e0de22e141b6a /crypto
parent40725181b74be6b0e3bdc8c05bd1e0b9873ec5cc (diff)
downloadkernel_samsung_smdk4412-95477377995aefa2ec1654a9a3777bd57ea99146.tar.gz
kernel_samsung_smdk4412-95477377995aefa2ec1654a9a3777bd57ea99146.tar.bz2
kernel_samsung_smdk4412-95477377995aefa2ec1654a9a3777bd57ea99146.zip
[CRYPTO] Add alignmask for low-level cipher implementations
The VIA Padlock device requires the input and output buffers to be aligned on 16-byte boundaries. This patch adds the alignmask attribute for low-level cipher implementations to indicate their alignment requirements. The mid-level crypt() function will copy the input/output buffers if they are not aligned correctly before they are passed to the low-level implementation. Strictly speaking, some of the software implementations require the buffers to be aligned on 4-byte boundaries as they do 32-bit loads. However, it is not clear whether it is better to copy the buffers or pay the penalty for unaligned loads/stores. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/cipher.c43
-rw-r--r--crypto/scatterwalk.h6
3 files changed, 48 insertions, 7 deletions
diff --git a/crypto/api.c b/crypto/api.c
index 394169a8577..f55856b2199 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -168,6 +168,12 @@ int crypto_register_alg(struct crypto_alg *alg)
{
int ret = 0;
struct crypto_alg *q;
+
+ if (alg->cra_alignmask & (alg->cra_alignmask + 1))
+ return -EINVAL;
+
+ if (alg->cra_alignmask > PAGE_SIZE)
+ return -EINVAL;
down_write(&crypto_alg_sem);
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 54c4a560070..85eb12f8e56 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -41,8 +41,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
struct scatter_walk *in,
struct scatter_walk *out, unsigned int bsize)
{
- u8 src[bsize];
- u8 dst[bsize];
+ unsigned int alignmask = desc->tfm->__crt_alg->cra_alignmask;
+ u8 buffer[bsize * 2 + alignmask];
+ u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ u8 *dst = src + bsize;
unsigned int n;
n = scatterwalk_copychunks(src, in, bsize, 0);
@@ -59,15 +61,24 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
static inline unsigned int crypt_fast(const struct cipher_desc *desc,
struct scatter_walk *in,
struct scatter_walk *out,
- unsigned int nbytes)
+ unsigned int nbytes, u8 *tmp)
{
u8 *src, *dst;
src = in->data;
dst = scatterwalk_samebuf(in, out) ? src : out->data;
+ if (tmp) {
+ memcpy(tmp, in->data, nbytes);
+ src = tmp;
+ dst = tmp;
+ }
+
nbytes = desc->prfn(desc, dst, src, nbytes);
+ if (tmp)
+ memcpy(out->data, tmp, nbytes);
+
scatterwalk_advance(in, nbytes);
scatterwalk_advance(out, nbytes);
@@ -87,6 +98,8 @@ static int crypt(const struct cipher_desc *desc,
struct scatter_walk walk_in, walk_out;
struct crypto_tfm *tfm = desc->tfm;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
+ unsigned int alignmask = tfm->__crt_alg->cra_alignmask;
+ unsigned long buffer = 0;
if (!nbytes)
return 0;
@@ -100,16 +113,27 @@ static int crypt(const struct cipher_desc *desc,
scatterwalk_start(&walk_out, dst);
for(;;) {
- unsigned int n;
+ unsigned int n = nbytes;
+ u8 *tmp = NULL;
+
+ if (!scatterwalk_aligned(&walk_in, alignmask) ||
+ !scatterwalk_aligned(&walk_out, alignmask)) {
+ if (!buffer) {
+ buffer = __get_free_page(GFP_ATOMIC);
+ if (!buffer)
+ n = 0;
+ }
+ tmp = (u8 *)buffer;
+ }
scatterwalk_map(&walk_in, 0);
scatterwalk_map(&walk_out, 1);
- n = scatterwalk_clamp(&walk_in, nbytes);
+ n = scatterwalk_clamp(&walk_in, n);
n = scatterwalk_clamp(&walk_out, n);
if (likely(n >= bsize))
- n = crypt_fast(desc, &walk_in, &walk_out, n);
+ n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
else
n = crypt_slow(desc, &walk_in, &walk_out, bsize);
@@ -119,10 +143,15 @@ static int crypt(const struct cipher_desc *desc,
scatterwalk_done(&walk_out, 1, nbytes);
if (!nbytes)
- return 0;
+ break;
crypto_yield(tfm);
}
+
+ if (buffer)
+ free_page(buffer);
+
+ return 0;
}
static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
index 5495bb97081..e79925c474a 100644
--- a/crypto/scatterwalk.h
+++ b/crypto/scatterwalk.h
@@ -55,6 +55,12 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->len_this_segment -= nbytes;
}
+static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
+ unsigned int alignmask)
+{
+ return !(walk->offset & alignmask);
+}
+
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
void scatterwalk_map(struct scatter_walk *walk, int out);