X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fpadlock-aes.c;h=5158a9db4bc58faeabd16b44a7ed2221cbd141ed;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=ed708b4427b0836a2dc836023a683665a2415872;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index ed708b442..5158a9db4 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include "padlock.h" @@ -59,8 +60,12 @@ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) struct aes_ctx { - uint32_t e_data[AES_EXTENDED_KEY_SIZE+4]; - uint32_t d_data[AES_EXTENDED_KEY_SIZE+4]; + uint32_t e_data[AES_EXTENDED_KEY_SIZE]; + uint32_t d_data[AES_EXTENDED_KEY_SIZE]; + struct { + struct cword encrypt; + struct cword decrypt; + } cword; uint32_t *E; uint32_t *D; int key_length; @@ -94,9 +99,6 @@ byte(const uint32_t x, const unsigned n) return x >> (n << 3); } -#define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x)) -#define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from)) - #define E_KEY ctx->E #define D_KEY ctx->D @@ -280,10 +282,20 @@ aes_hw_extkey_available(uint8_t key_len) return 0; } +static inline struct aes_ctx *aes_ctx(void *ctx) +{ + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN((unsigned long)ctx, align); +} + static int aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) { - struct aes_ctx *ctx = ctx_arg; + struct aes_ctx *ctx = aes_ctx(ctx_arg); + const __le32 *key = (const __le32 *)in_key; uint32_t i, t, u, v, w; uint32_t P[AES_EXTENDED_KEY_SIZE]; uint32_t rounds; @@ -295,25 +307,36 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t ctx->key_length = key_len; + /* + * If the hardware is capable of generating the extended key + * itself we must supply the plain key for both encryption + * and decryption. + */ ctx->E = ctx->e_data; - ctx->D = ctx->d_data; + ctx->D = ctx->e_data; - /* Ensure 16-Bytes alignmentation of keys for VIA PadLock. */ - if ((int)(ctx->e_data) & 0x0F) - ctx->E += 4 - (((int)(ctx->e_data) & 0x0F) / sizeof (ctx->e_data[0])); + E_KEY[0] = le32_to_cpu(key[0]); + E_KEY[1] = le32_to_cpu(key[1]); + E_KEY[2] = le32_to_cpu(key[2]); + E_KEY[3] = le32_to_cpu(key[3]); - if ((int)(ctx->d_data) & 0x0F) - ctx->D += 4 - (((int)(ctx->d_data) & 0x0F) / sizeof (ctx->d_data[0])); + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); - E_KEY[0] = uint32_t_in (in_key); - E_KEY[1] = uint32_t_in (in_key + 4); - E_KEY[2] = uint32_t_in (in_key + 8); - E_KEY[3] = uint32_t_in (in_key + 12); + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; /* Don't generate extended keys if the hardware can do it. */ if (aes_hw_extkey_available(key_len)) return 0; + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + switch (key_len) { case 16: t = E_KEY[3]; @@ -322,17 +345,17 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t break; case 24: - E_KEY[4] = uint32_t_in (in_key + 16); - t = E_KEY[5] = uint32_t_in (in_key + 20); + E_KEY[4] = le32_to_cpu(key[4]); + t = E_KEY[5] = le32_to_cpu(key[5]); for (i = 0; i < 8; ++i) loop6 (i); break; case 32: - E_KEY[4] = uint32_t_in (in_key + 16); - E_KEY[5] = uint32_t_in (in_key + 20); - E_KEY[6] = uint32_t_in (in_key + 24); - t = E_KEY[7] = uint32_t_in (in_key + 28); + E_KEY[4] = le32_to_cpu(key[4]); + E_KEY[5] = le32_to_cpu(key[5]); + E_KEY[6] = le32_to_cpu(key[6]); + t = E_KEY[7] = le32_to_cpu(key[7]); for (i = 0; i < 7; ++i) loop8 (i); break; @@ -369,10 +392,9 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t /* ====== Encryption/decryption routines ====== */ -/* This is the real call to PadLock. */ -static inline void -padlock_xcrypt_ecb(uint8_t *input, uint8_t *output, uint8_t *key, - void *control_word, uint32_t count) +/* These are the real call to PadLock. */ +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, + void *control_word, u32 count) { asm volatile ("pushfl; popfl"); /* enforce key reload. */ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ @@ -380,67 +402,80 @@ padlock_xcrypt_ecb(uint8_t *input, uint8_t *output, uint8_t *key, : "d"(control_word), "b"(key), "c"(count)); } -static void -aes_padlock(void *ctx_arg, uint8_t *out_arg, const uint8_t *in_arg, int encdec) +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, void *control_word, u32 count) { - /* Don't blindly modify this structure - the items must - fit on 16-Bytes boundaries! */ - struct padlock_xcrypt_data { - uint8_t buf[AES_BLOCK_SIZE]; - union cword cword; - }; - - struct aes_ctx *ctx = ctx_arg; - char bigbuf[sizeof(struct padlock_xcrypt_data) + 16]; - struct padlock_xcrypt_data *data; - void *key; - - /* Place 'data' at the first 16-Bytes aligned address in 'bigbuf'. */ - if (((long)bigbuf) & 0x0F) - data = (void*)(bigbuf + 16 - ((long)bigbuf & 0x0F)); - else - data = (void*)bigbuf; - - /* Prepare Control word. */ - memset (data, 0, sizeof(struct padlock_xcrypt_data)); - data->cword.b.encdec = !encdec; /* in the rest of cryptoapi ENC=1/DEC=0 */ - data->cword.b.rounds = 10 + (ctx->key_length - 16) / 4; - data->cword.b.ksize = (ctx->key_length - 16) / 8; - - /* Is the hardware capable to generate the extended key? */ - if (!aes_hw_extkey_available(ctx->key_length)) - data->cword.b.keygen = 1; - - /* ctx->E starts with a plain key - if the hardware is capable - to generate the extended key itself we must supply - the plain key for both Encryption and Decryption. */ - if (encdec == CRYPTO_DIR_ENCRYPT || data->cword.b.keygen == 0) - key = ctx->E; - else - key = ctx->D; - - memcpy(data->buf, in_arg, AES_BLOCK_SIZE); - padlock_xcrypt_ecb(data->buf, data->buf, key, &data->cword, 1); - memcpy(out_arg, data->buf, AES_BLOCK_SIZE); + /* Enforce key reload. */ + asm volatile ("pushfl; popfl"); + /* rep xcryptcbc */ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; } static void aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) { - aes_padlock(ctx_arg, out, in, CRYPTO_DIR_ENCRYPT); + struct aes_ctx *ctx = aes_ctx(ctx_arg); + padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); } static void aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) { - aes_padlock(ctx_arg, out, in, CRYPTO_DIR_DECRYPT); + struct aes_ctx *ctx = aes_ctx(ctx_arg); + padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); +} + +static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + u8 *iv; + + iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, + &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); + memcpy(desc->info, iv, AES_BLOCK_SIZE); + + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); } static struct crypto_alg aes_alg = { .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { @@ -449,7 +484,11 @@ static struct crypto_alg aes_alg = { .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt + .cia_decrypt = aes_decrypt, + .cia_encrypt_ecb = aes_encrypt_ecb, + .cia_decrypt_ecb = aes_decrypt_ecb, + .cia_encrypt_cbc = aes_encrypt_cbc, + .cia_decrypt_cbc = aes_decrypt_cbc, } } };