diff options
Diffstat (limited to 'drivers/crypto/n2_core.c')
| -rw-r--r-- | drivers/crypto/n2_core.c | 542 |
1 files changed, 361 insertions, 181 deletions
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 23163fda503..7263c10a56e 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1,6 +1,6 @@ /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. * - * Copyright (C) 2010 David S. Miller <davem@davemloft.net> + * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -31,10 +31,10 @@ #include "n2_core.h" #define DRV_MODULE_NAME "n2_crypto" -#define DRV_MODULE_VERSION "0.1" -#define DRV_MODULE_RELDATE "April 29, 2010" +#define DRV_MODULE_VERSION "0.2" +#define DRV_MODULE_RELDATE "July 28, 2011" -static char version[] __devinitdata = +static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); @@ -42,7 +42,7 @@ MODULE_DESCRIPTION("Niagara2 Crypto driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -#define N2_CRA_PRIORITY 300 +#define N2_CRA_PRIORITY 200 static DEFINE_MUTEX(spu_lock); @@ -239,21 +239,57 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) } #endif -struct n2_base_ctx { - struct list_head list; +struct n2_ahash_alg { + struct list_head entry; + const char *hash_zero; + const u32 *hash_init; + u8 hw_op_hashsz; + u8 digest_size; + u8 auth_type; + u8 hmac_type; + struct ahash_alg alg; }; -static void n2_base_ctx_init(struct n2_base_ctx *ctx) +static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) { - INIT_LIST_HEAD(&ctx->list); + struct crypto_alg *alg = tfm->__crt_alg; + struct ahash_alg *ahash_alg; + + ahash_alg = container_of(alg, struct ahash_alg, halg.base); + + return container_of(ahash_alg, struct n2_ahash_alg, alg); } -struct n2_hash_ctx { - struct n2_base_ctx base; +struct n2_hmac_alg { + const char *child_alg; + struct n2_ahash_alg derived; +}; +static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct ahash_alg *ahash_alg; + + ahash_alg = container_of(alg, struct ahash_alg, halg.base); + + return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); +} + +struct n2_hash_ctx { struct crypto_ahash *fallback_tfm; }; +#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ + +struct n2_hmac_ctx { + struct n2_hash_ctx base; + + struct crypto_shash *child_shash; + + int hash_key_len; + unsigned char hash_key[N2_HASH_KEY_MAX]; +}; + struct n2_hash_req_ctx { union { struct md5_state md5; @@ -261,9 +297,6 @@ struct n2_hash_req_ctx { struct sha256_state sha256; } u; - unsigned char hash_key[64]; - unsigned char keyed_zero_hash[32]; - struct ahash_request fallback_req; }; @@ -323,7 +356,7 @@ static int n2_hash_async_finup(struct ahash_request *req) static int n2_hash_cra_init(struct crypto_tfm *tfm) { - const char *fallback_driver_name = tfm->__crt_alg->cra_name; + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct crypto_ahash *fallback_tfm; @@ -356,6 +389,94 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm) crypto_free_ahash(ctx->fallback_tfm); } +static int n2_hmac_cra_init(struct crypto_tfm *tfm) +{ + const char *fallback_driver_name = crypto_tfm_alg_name(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); + struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); + struct crypto_ahash *fallback_tfm; + struct crypto_shash *child_shash; + int err; + + fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + pr_warning("Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + err = PTR_ERR(fallback_tfm); + goto out; + } + + child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); + if (IS_ERR(child_shash)) { + pr_warning("Child shash '%s' could not be loaded!\n", + n2alg->child_alg); + err = PTR_ERR(child_shash); + goto out_free_fallback; + } + + crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + + crypto_ahash_reqsize(fallback_tfm))); + + ctx->child_shash = child_shash; + ctx->base.fallback_tfm = fallback_tfm; + return 0; + +out_free_fallback: + crypto_free_ahash(fallback_tfm); + +out: + return err; +} + +static void n2_hmac_cra_exit(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); + + crypto_free_ahash(ctx->base.fallback_tfm); + crypto_free_shash(ctx->child_shash); +} + +static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct crypto_shash *child_shash = ctx->child_shash; + struct crypto_ahash *fallback_tfm; + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(child_shash)]; + } desc; + int err, bs, ds; + + fallback_tfm = ctx->base.fallback_tfm; + err = crypto_ahash_setkey(fallback_tfm, key, keylen); + if (err) + return err; + + desc.shash.tfm = child_shash; + desc.shash.flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + bs = crypto_shash_blocksize(child_shash); + ds = crypto_shash_digestsize(child_shash); + BUG_ON(ds > N2_HASH_KEY_MAX); + if (keylen > bs) { + err = crypto_shash_digest(&desc.shash, key, keylen, + ctx->hash_key); + if (err) + return err; + keylen = ds; + } else if (keylen <= N2_HASH_KEY_MAX) + memcpy(ctx->hash_key, key, keylen); + + ctx->hash_key_len = keylen; + + return err; +} + static unsigned long wait_for_tail(struct spu_queue *qp) { unsigned long head, hv_ret; @@ -385,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, return hv_ret; } -static int n2_hash_async_digest(struct ahash_request *req, - unsigned int auth_type, unsigned int digest_size, - unsigned int result_size, void *hash_loc) +static int n2_do_async_digest(struct ahash_request *req, + unsigned int auth_type, unsigned int digest_size, + unsigned int result_size, void *hash_loc, + unsigned long auth_key, unsigned int auth_key_len) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct cwq_initial_entry *ent; struct crypto_hash_walk walk; struct spu_queue *qp; @@ -403,6 +524,7 @@ static int n2_hash_async_digest(struct ahash_request *req, */ if (unlikely(req->nbytes > (1 << 16))) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = @@ -414,8 +536,6 @@ static int n2_hash_async_digest(struct ahash_request *req, return crypto_ahash_digest(&rctx->fallback_req); } - n2_base_ctx_init(&ctx->base); - nbytes = crypto_hash_walk_first(req, &walk); cpu = get_cpu(); @@ -430,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req, */ ent = qp->q + qp->tail; - ent->control = control_word_base(nbytes, 0, 0, + ent->control = control_word_base(nbytes, auth_key_len, 0, auth_type, digest_size, false, true, false, false, OPCODE_INPLACE_BIT | OPCODE_AUTH_MAC); ent->src_addr = __pa(walk.data); - ent->auth_key_addr = 0UL; + ent->auth_key_addr = auth_key; ent->auth_iv_addr = __pa(hash_loc); ent->final_auth_state_addr = 0UL; ent->enc_key_addr = 0UL; @@ -475,114 +595,55 @@ out: return err; } -static int n2_md5_async_digest(struct ahash_request *req) +static int n2_hash_async_digest(struct ahash_request *req) { + struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct md5_state *m = &rctx->u.md5; + int ds; + ds = n2alg->digest_size; if (unlikely(req->nbytes == 0)) { - static const char md5_zero[MD5_DIGEST_SIZE] = { - 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, - 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, - }; - - memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); + memcpy(req->result, n2alg->hash_zero, ds); return 0; } - m->hash[0] = cpu_to_le32(0x67452301); - m->hash[1] = cpu_to_le32(0xefcdab89); - m->hash[2] = cpu_to_le32(0x98badcfe); - m->hash[3] = cpu_to_le32(0x10325476); + memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); - return n2_hash_async_digest(req, AUTH_TYPE_MD5, - MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, - m->hash); + return n2_do_async_digest(req, n2alg->auth_type, + n2alg->hw_op_hashsz, ds, + &rctx->u, 0UL, 0); } -static int n2_sha1_async_digest(struct ahash_request *req) +static int n2_hmac_async_digest(struct ahash_request *req) { + struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct sha1_state *s = &rctx->u.sha1; - - if (unlikely(req->nbytes == 0)) { - static const char sha1_zero[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, - 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, - 0x07, 0x09 - }; - - memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); - return 0; - } - s->state[0] = SHA1_H0; - s->state[1] = SHA1_H1; - s->state[2] = SHA1_H2; - s->state[3] = SHA1_H3; - s->state[4] = SHA1_H4; - - return n2_hash_async_digest(req, AUTH_TYPE_SHA1, - SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, - s->state); -} - -static int n2_sha256_async_digest(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct sha256_state *s = &rctx->u.sha256; - - if (req->nbytes == 0) { - static const char sha256_zero[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, - 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, - 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, - 0x1b, 0x78, 0x52, 0xb8, 0x55 - }; - - memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); - return 0; - } - s->state[0] = SHA256_H0; - s->state[1] = SHA256_H1; - s->state[2] = SHA256_H2; - s->state[3] = SHA256_H3; - s->state[4] = SHA256_H4; - s->state[5] = SHA256_H5; - s->state[6] = SHA256_H6; - s->state[7] = SHA256_H7; - - return n2_hash_async_digest(req, AUTH_TYPE_SHA256, - SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, - s->state); -} + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ds; -static int n2_sha224_async_digest(struct ahash_request *req) -{ - struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); - struct sha256_state *s = &rctx->u.sha256; + ds = n2alg->derived.digest_size; + if (unlikely(req->nbytes == 0) || + unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); + struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); - if (req->nbytes == 0) { - static const char sha224_zero[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f - }; + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; - memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); - return 0; + return crypto_ahash_digest(&rctx->fallback_req); } - s->state[0] = SHA224_H0; - s->state[1] = SHA224_H1; - s->state[2] = SHA224_H2; - s->state[3] = SHA224_H3; - s->state[4] = SHA224_H4; - s->state[5] = SHA224_H5; - s->state[6] = SHA224_H6; - s->state[7] = SHA224_H7; + memcpy(&rctx->u, n2alg->derived.hash_init, + n2alg->derived.hw_op_hashsz); - return n2_hash_async_digest(req, AUTH_TYPE_SHA256, - SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, - s->state); + return n2_do_async_digest(req, n2alg->derived.hmac_type, + n2alg->derived.hw_op_hashsz, ds, + &rctx->u, + __pa(&ctx->hash_key), + ctx->hash_key_len); } struct n2_cipher_context { @@ -945,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) spin_unlock_irqrestore(&qp->lock, flags); +out: put_cpu(); -out: n2_chunk_complete(req, NULL); return err; } @@ -1035,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) spin_unlock_irqrestore(&qp->lock, flags); +out: put_cpu(); -out: n2_chunk_complete(req, err ? NULL : final_iv_addr); return err; } @@ -1209,35 +1270,92 @@ static LIST_HEAD(cipher_algs); struct n2_hash_tmpl { const char *name; - int (*digest)(struct ahash_request *req); + const char *hash_zero; + const u32 *hash_init; + u8 hw_op_hashsz; u8 digest_size; u8 block_size; + u8 auth_type; + u8 hmac_type; +}; + +static const char md5_zero[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +static const u32 md5_init[MD5_HASH_WORDS] = { + cpu_to_le32(0x67452301), + cpu_to_le32(0xefcdab89), + cpu_to_le32(0x98badcfe), + cpu_to_le32(0x10325476), }; +static const char sha1_zero[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, + 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, + 0x07, 0x09 +}; +static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, +}; +static const char sha256_zero[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, + 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, + 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, + 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, +}; +static const char sha224_zero[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, +}; + static const struct n2_hash_tmpl hash_tmpls[] = { { .name = "md5", - .digest = n2_md5_async_digest, + .hash_zero = md5_zero, + .hash_init = md5_init, + .auth_type = AUTH_TYPE_MD5, + .hmac_type = AUTH_TYPE_HMAC_MD5, + .hw_op_hashsz = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE, .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", - .digest = n2_sha1_async_digest, + .hash_zero = sha1_zero, + .hash_init = sha1_init, + .auth_type = AUTH_TYPE_SHA1, + .hmac_type = AUTH_TYPE_HMAC_SHA1, + .hw_op_hashsz = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", - .digest = n2_sha256_async_digest, + .hash_zero = sha256_zero, + .hash_init = sha256_init, + .auth_type = AUTH_TYPE_SHA256, + .hmac_type = AUTH_TYPE_HMAC_SHA256, + .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", - .digest = n2_sha224_async_digest, + .hash_zero = sha224_zero, + .hash_init = sha224_init, + .auth_type = AUTH_TYPE_SHA256, + .hmac_type = AUTH_TYPE_RESERVED, + .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE, .block_size = SHA224_BLOCK_SIZE }, }; #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) -struct n2_ahash_alg { - struct list_head entry; - struct ahash_alg alg; -}; static LIST_HEAD(ahash_algs); +static LIST_HEAD(hmac_algs); static int algs_registered; @@ -1245,12 +1363,18 @@ static void __n2_unregister_algs(void) { struct n2_cipher_alg *cipher, *cipher_tmp; struct n2_ahash_alg *alg, *alg_tmp; + struct n2_hmac_alg *hmac, *hmac_tmp; list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { crypto_unregister_alg(&cipher->alg); list_del(&cipher->entry); kfree(cipher); } + list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { + crypto_unregister_ahash(&hmac->derived.alg); + list_del(&hmac->derived.entry); + kfree(hmac); + } list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { crypto_unregister_ahash(&alg->alg); list_del(&alg->entry); @@ -1264,7 +1388,7 @@ static int n2_cipher_cra_init(struct crypto_tfm *tfm) return 0; } -static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) +static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) { struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); struct crypto_alg *alg; @@ -1278,7 +1402,8 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); alg->cra_priority = N2_CRA_PRIORITY; - alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; alg->cra_blocksize = tmpl->block_size; p->enc_type = tmpl->enc_type; alg->cra_ctxsize = sizeof(struct n2_cipher_context); @@ -1290,13 +1415,54 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) list_add(&p->entry, &cipher_algs); err = crypto_register_alg(alg); if (err) { + pr_err("%s alg registration failed\n", alg->cra_name); list_del(&p->entry); kfree(p); + } else { + pr_info("%s alg registered\n", alg->cra_name); } return err; } -static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) +static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) +{ + struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); + struct ahash_alg *ahash; + struct crypto_alg *base; + int err; + + if (!p) + return -ENOMEM; + + p->child_alg = n2ahash->alg.halg.base.cra_name; + memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); + INIT_LIST_HEAD(&p->derived.entry); + + ahash = &p->derived.alg; + ahash->digest = n2_hmac_async_digest; + ahash->setkey = n2_hmac_async_setkey; + + base = &ahash->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); + + base->cra_ctxsize = sizeof(struct n2_hmac_ctx); + base->cra_init = n2_hmac_cra_init; + base->cra_exit = n2_hmac_cra_exit; + + list_add(&p->derived.entry, &hmac_algs); + err = crypto_register_ahash(ahash); + if (err) { + pr_err("%s alg registration failed\n", base->cra_name); + list_del(&p->derived.entry); + kfree(p); + } else { + pr_info("%s alg registered\n", base->cra_name); + } + return err; +} + +static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) { struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); struct hash_alg_common *halg; @@ -1307,12 +1473,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) if (!p) return -ENOMEM; + p->hash_zero = tmpl->hash_zero; + p->hash_init = tmpl->hash_init; + p->auth_type = tmpl->auth_type; + p->hmac_type = tmpl->hmac_type; + p->hw_op_hashsz = tmpl->hw_op_hashsz; + p->digest_size = tmpl->digest_size; + ahash = &p->alg; ahash->init = n2_hash_async_init; ahash->update = n2_hash_async_update; ahash->final = n2_hash_async_final; ahash->finup = n2_hash_async_finup; - ahash->digest = tmpl->digest; + ahash->digest = n2_hash_async_digest; halg = &ahash->halg; halg->digestsize = tmpl->digest_size; @@ -1321,7 +1494,9 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); base->cra_priority = N2_CRA_PRIORITY; - base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; + base->cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = tmpl->block_size; base->cra_ctxsize = sizeof(struct n2_hash_ctx); base->cra_module = THIS_MODULE; @@ -1331,13 +1506,18 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) list_add(&p->entry, &ahash_algs); err = crypto_register_ahash(ahash); if (err) { + pr_err("%s alg registration failed\n", base->cra_name); list_del(&p->entry); kfree(p); + } else { + pr_info("%s alg registered\n", base->cra_name); } + if (!err && p->hmac_type != AUTH_TYPE_RESERVED) + err = __n2_register_one_hmac(p); return err; } -static int __devinit n2_register_algs(void) +static int n2_register_algs(void) { int i, err = 0; @@ -1365,7 +1545,7 @@ out: return err; } -static void __exit n2_unregister_algs(void) +static void n2_unregister_algs(void) { mutex_lock(&spu_lock); if (!--algs_registered) @@ -1375,7 +1555,7 @@ static void __exit n2_unregister_algs(void) /* To map CWQ queues to interrupt sources, the hypervisor API provides * a devino. This isn't very useful to us because all of the - * interrupts listed in the of_device node have been translated to + * interrupts listed in the device_node have been translated to * Linux virtual IRQ cookie numbers. * * So we have to back-translate, going through the 'intr' and 'ino' @@ -1383,7 +1563,7 @@ static void __exit n2_unregister_algs(void) * 'interrupts' property entries, in order to to figure out which * devino goes to which already-translated IRQ. */ -static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, +static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, unsigned long dev_ino) { const unsigned int *dev_intrs; @@ -1403,7 +1583,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, if (!dev_intrs) return -ENODEV; - for (i = 0; i < dev->num_irqs; i++) { + for (i = 0; i < dev->archdata.num_irqs; i++) { if (dev_intrs[i] == intr) return i; } @@ -1411,7 +1591,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, return -ENODEV; } -static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, +static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, const char *irq_name, struct spu_queue *p, irq_handler_t handler) { @@ -1426,12 +1606,11 @@ static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, if (index < 0) return index; - p->irq = dev->irqs[index]; + p->irq = dev->archdata.irqs[index]; sprintf(p->irq_name, "%s-%d", irq_name, index); - return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, - p->irq_name, p); + return request_irq(p->irq, handler, 0, p->irq_name, p); } static struct kmem_cache *queue_cache[2]; @@ -1559,7 +1738,7 @@ static void spu_list_destroy(struct list_head *list) * gathering cpu membership information. */ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, - struct of_device *dev, + struct platform_device *dev, u64 node, struct spu_queue *p, struct spu_queue **table) { @@ -1586,7 +1765,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, /* Process an 'exec-unit' MDESC node of type 'cwq'. */ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, - struct of_device *dev, struct mdesc_handle *mdesc, + struct platform_device *dev, struct mdesc_handle *mdesc, u64 node, const char *iname, unsigned long q_type, irq_handler_t handler, struct spu_queue **table) { @@ -1617,7 +1796,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, return spu_map_ino(dev, ip, iname, p, handler); } -static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, +static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, struct spu_mdesc_info *ip, struct list_head *list, const char *exec_name, unsigned long q_type, irq_handler_t handler, struct spu_queue **table) @@ -1643,25 +1822,20 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, return err; } -static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, - struct spu_mdesc_info *ip) +static int get_irq_props(struct mdesc_handle *mdesc, u64 node, + struct spu_mdesc_info *ip) { - const u64 *intr, *ino; - int intr_len, ino_len; + const u64 *ino; + int ino_len; int i; - intr = mdesc_get_property(mdesc, node, "intr", &intr_len); - if (!intr) - return -ENODEV; - ino = mdesc_get_property(mdesc, node, "ino", &ino_len); - if (!intr) + if (!ino) { + printk("NO 'ino'\n"); return -ENODEV; + } - if (intr_len != ino_len) - return -EINVAL; - - ip->num_intrs = intr_len / sizeof(u64); + ip->num_intrs = ino_len / sizeof(u64); ip->ino_table = kzalloc((sizeof(struct ino_blob) * ip->num_intrs), GFP_KERNEL); @@ -1670,17 +1844,17 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, for (i = 0; i < ip->num_intrs; i++) { struct ino_blob *b = &ip->ino_table[i]; - b->intr = intr[i]; + b->intr = i + 1; b->ino = ino[i]; } return 0; } -static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, - struct of_device *dev, - struct spu_mdesc_info *ip, - const char *node_name) +static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, + struct platform_device *dev, + struct spu_mdesc_info *ip, + const char *node_name) { const unsigned int *reg; u64 node; @@ -1709,7 +1883,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, static unsigned long n2_spu_hvapi_major; static unsigned long n2_spu_hvapi_minor; -static int __devinit n2_spu_hvapi_register(void) +static int n2_spu_hvapi_register(void) { int err; @@ -1735,7 +1909,7 @@ static void n2_spu_hvapi_unregister(void) static int global_ref; -static int __devinit grab_global_resources(void) +static int grab_global_resources(void) { int err = 0; @@ -1799,7 +1973,7 @@ static void release_global_resources(void) mutex_unlock(&spu_lock); } -static struct n2_crypto * __devinit alloc_n2cp(void) +static struct n2_crypto *alloc_n2cp(void) { struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); @@ -1819,7 +1993,7 @@ static void free_n2cp(struct n2_crypto *np) kfree(np); } -static void __devinit n2_spu_driver_version(void) +static void n2_spu_driver_version(void) { static int n2_spu_version_printed; @@ -1827,8 +2001,7 @@ static void __devinit n2_spu_driver_version(void) pr_info("%s", version); } -static int __devinit n2_crypto_probe(struct of_device *dev, - const struct of_device_id *match) +static int n2_crypto_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; const char *full_name; @@ -1904,7 +2077,7 @@ out_free_n2cp: return err; } -static int __devexit n2_crypto_remove(struct of_device *dev) +static int n2_crypto_remove(struct platform_device *dev) { struct n2_crypto *np = dev_get_drvdata(&dev->dev); @@ -1919,7 +2092,7 @@ static int __devexit n2_crypto_remove(struct of_device *dev) return 0; } -static struct n2_mau * __devinit alloc_ncp(void) +static struct n2_mau *alloc_ncp(void) { struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); @@ -1939,8 +2112,7 @@ static void free_ncp(struct n2_mau *mp) kfree(mp); } -static int __devinit n2_mau_probe(struct of_device *dev, - const struct of_device_id *match) +static int n2_mau_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; const char *full_name; @@ -2007,7 +2179,7 @@ out_free_ncp: return err; } -static int __devexit n2_mau_remove(struct of_device *dev) +static int n2_mau_remove(struct platform_device *dev) { struct n2_mau *mp = dev_get_drvdata(&dev->dev); @@ -2029,19 +2201,23 @@ static struct of_device_id n2_crypto_match[] = { .name = "n2cp", .compatible = "SUNW,vf-cwq", }, + { + .name = "n2cp", + .compatible = "SUNW,kt-cwq", + }, {}, }; MODULE_DEVICE_TABLE(of, n2_crypto_match); -static struct of_platform_driver n2_crypto_driver = { +static struct platform_driver n2_crypto_driver = { .driver = { .name = "n2cp", .owner = THIS_MODULE, .of_match_table = n2_crypto_match, }, .probe = n2_crypto_probe, - .remove = __devexit_p(n2_crypto_remove), + .remove = n2_crypto_remove, }; static struct of_device_id n2_mau_match[] = { @@ -2053,37 +2229,41 @@ static struct of_device_id n2_mau_match[] = { .name = "ncp", .compatible = "SUNW,vf-mau", }, + { + .name = "ncp", + .compatible = "SUNW,kt-mau", + }, {}, }; MODULE_DEVICE_TABLE(of, n2_mau_match); -static struct of_platform_driver n2_mau_driver = { +static struct platform_driver n2_mau_driver = { .driver = { .name = "ncp", .owner = THIS_MODULE, .of_match_table = n2_mau_match, }, .probe = n2_mau_probe, - .remove = __devexit_p(n2_mau_remove), + .remove = n2_mau_remove, }; static int __init n2_init(void) { - int err = of_register_driver(&n2_crypto_driver, &of_bus_type); + int err = platform_driver_register(&n2_crypto_driver); if (!err) { - err = of_register_driver(&n2_mau_driver, &of_bus_type); + err = platform_driver_register(&n2_mau_driver); if (err) - of_unregister_driver(&n2_crypto_driver); + platform_driver_unregister(&n2_crypto_driver); } return err; } static void __exit n2_exit(void) { - of_unregister_driver(&n2_mau_driver); - of_unregister_driver(&n2_crypto_driver); + platform_driver_unregister(&n2_mau_driver); + platform_driver_unregister(&n2_crypto_driver); } module_init(n2_init); |
