summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:10:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:10:50 -0800
commit36289a03bcd3aabdf66de75cb6d1b4ee15726438 (patch)
tree1230c6391678f9255f74d7a4f65e95ea8a39d452 /crypto
parent69308402ca6f5b80a5a090ade0b13bd146891420 (diff)
parent8b84475318641c2b89320859332544cf187e1cbd (diff)
downloadlinux-36289a03bcd3aabdf66de75cb6d1b4ee15726438.tar.gz
linux-36289a03bcd3aabdf66de75cb6d1b4ee15726438.tar.bz2
linux-36289a03bcd3aabdf66de75cb6d1b4ee15726438.zip
Merge tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Use kmap_local instead of kmap_atomic - Change request callback to take void pointer - Print FIPS status in /proc/crypto (when enabled) Algorithms: - Add rfc4106/gcm support on arm64 - Add ARIA AVX2/512 support on x86 Drivers: - Add TRNG driver for StarFive SoC - Delete ux500/hash driver (subsumed by stm32/hash) - Add zlib support in qat - Add RSA support in aspeed" * tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (156 commits) crypto: x86/aria-avx - Do not use avx2 instructions crypto: aspeed - Fix modular aspeed-acry crypto: hisilicon/qm - fix coding style issues crypto: hisilicon/qm - update comments to match function crypto: hisilicon/qm - change function names crypto: hisilicon/qm - use min() instead of min_t() crypto: hisilicon/qm - remove some unused defines crypto: proc - Print fips status crypto: crypto4xx - Call dma_unmap_page when done crypto: octeontx2 - Fix objects shared between several modules crypto: nx - Fix sparse warnings crypto: ecc - Silence sparse warning tls: Pass rec instead of aead_req into tls_encrypt_done crypto: api - Remove completion function scaffolding tls: Remove completion function scaffolding tipc: Remove completion function scaffolding net: ipv6: Remove completion function scaffolding net: ipv4: Remove completion function scaffolding net: macsec: Remove completion function scaffolding dm: Remove completion function scaffolding ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/adiantum.c5
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/ahash.c195
-rw-r--r--crypto/api.c4
-rw-r--r--crypto/aria_generic.c4
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c15
-rw-r--r--crypto/ccm.c9
-rw-r--r--crypto/chacha20poly1305.c40
-rw-r--r--crypto/cryptd.c290
-rw-r--r--crypto/crypto_engine.c8
-rw-r--r--crypto/cts.c12
-rw-r--r--crypto/dh.c5
-rw-r--r--crypto/ecc.c6
-rw-r--r--crypto/essiv.c15
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/hctr2.c5
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/pcrypt.c4
-rw-r--r--crypto/proc.c6
-rw-r--r--crypto/rsa-pkcs1pad.c51
-rw-r--r--crypto/seqiv.c7
-rw-r--r--crypto/shash.c4
-rw-r--r--crypto/skcipher.c22
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/tcrypt.h2
-rw-r--r--crypto/testmgr.c16
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xts.c20
29 files changed, 400 insertions, 415 deletions
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 84450130cb6b..c33ba22a6638 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -308,10 +308,9 @@ static int adiantum_finish(struct skcipher_request *req)
return 0;
}
-static void adiantum_streamcipher_done(struct crypto_async_request *areq,
- int err)
+static void adiantum_streamcipher_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err)
err = adiantum_finish(req);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 0a4fa2a429e2..5f7252a5b7b4 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1186,7 +1186,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
/**
* af_alg_async_cb - AIO callback handler
- * @_req: async request info
+ * @data: async request completion data
* @err: if non-zero, error result to be returned via ki_complete();
* otherwise return the AIO output length via ki_complete().
*
@@ -1196,9 +1196,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
* The number of bytes to be generated with the AIO operation must be set
* in areq->outlen before the AIO callback handler is invoked.
*/
-void af_alg_async_cb(struct crypto_async_request *_req, int err)
+void af_alg_async_cb(void *data, int err)
{
- struct af_alg_async_req *areq = _req->data;
+ struct af_alg_async_req *areq = data;
struct sock *sk = areq->sk;
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index c2ca631a111f..ff8c79d975c1 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -45,7 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
- walk->data = kmap_atomic(walk->pg);
+ walk->data = kmap_local_page(walk->pg);
walk->data += offset;
if (offset & alignmask) {
@@ -95,7 +95,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
}
}
- kunmap_atomic(walk->data);
+ kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
@@ -190,133 +190,98 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static inline unsigned int ahash_align_buffer_size(unsigned len,
- unsigned long mask)
-{
- return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
-}
-
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
+ bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
+ struct ahash_request *subreq;
+ unsigned int subreq_size;
+ unsigned int reqsize;
+ u8 *result;
+ gfp_t gfp;
+ u32 flags;
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
+ subreq_size = sizeof(*subreq);
+ reqsize = crypto_ahash_reqsize(tfm);
+ reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
+ subreq_size += reqsize;
+ subreq_size += ds;
+ subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+
+ flags = ahash_request_flags(req);
+ gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
+ subreq = kmalloc(subreq_size, gfp);
+ if (!subreq)
return -ENOMEM;
- /*
- * WARNING: Voodoo programming below!
- *
- * The code below is obscure and hard to understand, thus explanation
- * is necessary. See include/crypto/hash.h and include/linux/crypto.h
- * to understand the layout of structures used here!
- *
- * The code here will replace portions of the ORIGINAL request with
- * pointers to new code and buffers so the hashing operation can store
- * the result in aligned buffer. We will call the modified request
- * an ADJUSTED request.
- *
- * The newly mangled request will look as such:
- *
- * req {
- * .result = ADJUSTED[new aligned buffer]
- * .base.complete = ADJUSTED[pointer to completion function]
- * .base.data = ADJUSTED[*req (pointer to self)]
- * .priv = ADJUSTED[new priv] {
- * .result = ORIGINAL(result)
- * .complete = ORIGINAL(base.complete)
- * .data = ORIGINAL(base.data)
- * }
- */
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
- priv->flags = req->base.flags;
-
- /*
- * WARNING: We do not backup req->priv here! The req->priv
- * is for internal use of the Crypto API and the
- * user must _NOT_ _EVER_ depend on it's content!
- */
-
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = cplt;
- req->base.data = req;
- req->priv = priv;
+ ahash_request_set_tfm(subreq, tfm);
+ ahash_request_set_callback(subreq, flags, cplt, req);
+
+ result = (u8 *)(subreq + 1) + reqsize;
+ result = PTR_ALIGN(result, alignmask + 1);
+
+ ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
+
+ if (has_state) {
+ void *state;
+
+ state = kmalloc(crypto_ahash_statesize(tfm), gfp);
+ if (!state) {
+ kfree(subreq);
+ return -ENOMEM;
+ }
+
+ crypto_ahash_export(req, state);
+ crypto_ahash_import(subreq, state);
+ kfree_sensitive(state);
+ }
+
+ req->priv = subreq;
return 0;
}
static void ahash_restore_req(struct ahash_request *req, int err)
{
- struct ahash_request_priv *priv = req->priv;
+ struct ahash_request *subreq = req->priv;
if (!err)
- memcpy(priv->result, req->result,
+ memcpy(req->result, subreq->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- /* Restore the original crypto request. */
- req->result = priv->result;
-
- ahash_request_set_callback(req, priv->flags,
- priv->complete, priv->data);
req->priv = NULL;
- /* Free the req->priv.priv from the ADJUSTED request. */
- kfree_sensitive(priv);
-}
-
-static void ahash_notify_einprogress(struct ahash_request *req)
-{
- struct ahash_request_priv *priv = req->priv;
- struct crypto_async_request oreq;
-
- oreq.data = priv->data;
-
- priv->complete(&oreq, -EINPROGRESS);
+ kfree_sensitive(subreq);
}
-static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+static void ahash_op_unaligned_done(void *data, int err)
{
- struct ahash_request *areq = req->data;
-
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
- }
+ struct ahash_request *areq = data;
- /*
- * Restore the original request, see ahash_op_unaligned() for what
- * goes where.
- *
- * The "struct ahash_request *req" here is in fact the "req.base"
- * from the ADJUSTED request from ahash_op_unaligned(), thus as it
- * is a pointer to self, it is also the ADJUSTED "req" .
- */
+ if (err == -EINPROGRESS)
+ goto out;
/* First copy req->result into req->priv.result */
ahash_restore_req(areq, err);
+out:
/* Complete the ORIGINAL request. */
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+ int (*op)(struct ahash_request *),
+ bool has_state)
{
int err;
- err = ahash_save_req(req, ahash_op_unaligned_done);
+ err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
if (err)
return err;
- err = op(req);
+ err = op(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -326,13 +291,14 @@ static int ahash_op_unaligned(struct ahash_request *req,
}
static int crypto_ahash_op(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+ int (*op)(struct ahash_request *),
+ bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op);
+ return ahash_op_unaligned(req, op, has_state);
return op(req);
}
@@ -345,7 +311,7 @@ int crypto_ahash_final(struct ahash_request *req)
int ret;
crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
@@ -359,7 +325,7 @@ int crypto_ahash_finup(struct ahash_request *req)
int ret;
crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
@@ -376,32 +342,34 @@ int crypto_ahash_digest(struct ahash_request *req)
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_ahash_op(req, tfm->digest);
+ ret = crypto_ahash_op(req, tfm->digest, false);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_request *areq = req->data;
+ struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
ahash_restore_req(areq, err);
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
+ struct ahash_request *subreq = req->priv;
+
if (err)
goto out;
- req->base.complete = ahash_def_finup_done2;
+ subreq->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_reqtfm(req)->final(req);
+ err = crypto_ahash_reqtfm(req)->final(subreq);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -410,22 +378,23 @@ out:
return err;
}
-static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_request *areq = req->data;
+ struct ahash_request *areq = data;
+ struct ahash_request *subreq;
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
- }
+ if (err == -EINPROGRESS)
+ goto out;
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ subreq = areq->priv;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = ahash_def_finup_finish1(areq, err);
- if (areq->priv)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
- areq->base.complete(&areq->base, err);
+out:
+ ahash_request_complete(areq, err);
}
static int ahash_def_finup(struct ahash_request *req)
@@ -433,11 +402,11 @@ static int ahash_def_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
- err = ahash_save_req(req, ahash_def_finup_done1);
+ err = ahash_save_req(req, ahash_def_finup_done1, true);
if (err)
return err;
- err = tfm->update(req);
+ err = tfm->update(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
diff --git a/crypto/api.c b/crypto/api.c
index b022702f6436..e67cc63368ed 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -643,9 +643,9 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
-void crypto_req_done(struct crypto_async_request *req, int err)
+void crypto_req_done(void *data, int err)
{
- struct crypto_wait *wait = req->data;
+ struct crypto_wait *wait = data;
if (err == -EINPROGRESS)
return;
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index 4cc29b82b99d..d96dfc4fdde6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -178,6 +178,10 @@ int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
+ BUILD_BUG_ON(sizeof(ctx->enc_key) != 272);
+ BUILD_BUG_ON(sizeof(ctx->dec_key) != 272);
+ BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds));
+
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 17f674a7cdff..3326c7343e86 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -109,9 +109,9 @@ out:
return err;
}
-static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+static void authenc_geniv_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
@@ -160,10 +160,9 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
return 0;
}
-static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
+ struct aead_request *areq = data;
if (err)
goto out;
@@ -261,10 +260,9 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index b60e61b1904c..91424e791d5c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -107,10 +107,9 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
return 0;
}
-static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_geniv_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
aead_request_complete(req, err);
@@ -153,10 +152,9 @@ static int crypto_authenc_esn_genicv(struct aead_request *req,
}
-static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_esn_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
+ struct aead_request *areq = data;
if (!err)
err = crypto_authenc_esn_genicv(areq, 0);
@@ -258,10 +256,9 @@ decrypt:
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
authenc_esn_request_complete(req, err);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 30dbae72728f..a9453129c51c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -224,9 +224,9 @@ out:
return err;
}
-static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_ccm_encrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
u8 *odata = pctx->odata;
@@ -320,10 +320,9 @@ static int crypto_ccm_encrypt(struct aead_request *req)
return err;
}
-static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
- int err)
+static void crypto_ccm_decrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 97bbb135e9a6..3a905c5d8f53 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -115,9 +115,9 @@ static int poly_copy_tag(struct aead_request *req)
return 0;
}
-static void chacha_decrypt_done(struct crypto_async_request *areq, int err)
+static void chacha_decrypt_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_verify_tag);
+ async_done_continue(data, err, poly_verify_tag);
}
static int chacha_decrypt(struct aead_request *req)
@@ -161,9 +161,9 @@ static int poly_tail_continue(struct aead_request *req)
return chacha_decrypt(req);
}
-static void poly_tail_done(struct crypto_async_request *areq, int err)
+static void poly_tail_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_tail_continue);
+ async_done_continue(data, err, poly_tail_continue);
}
static int poly_tail(struct aead_request *req)
@@ -191,9 +191,9 @@ static int poly_tail(struct aead_request *req)
return poly_tail_continue(req);
}
-static void poly_cipherpad_done(struct crypto_async_request *areq, int err)
+static void poly_cipherpad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_tail);
+ async_done_continue(data, err, poly_tail);
}
static int poly_cipherpad(struct aead_request *req)
@@ -220,9 +220,9 @@ static int poly_cipherpad(struct aead_request *req)
return poly_tail(req);
}
-static void poly_cipher_done(struct crypto_async_request *areq, int err)
+static void poly_cipher_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_cipherpad);
+ async_done_continue(data, err, poly_cipherpad);
}
static int poly_cipher(struct aead_request *req)
@@ -250,9 +250,9 @@ static int poly_cipher(struct aead_request *req)
return poly_cipherpad(req);
}
-static void poly_adpad_done(struct crypto_async_request *areq, int err)
+static void poly_adpad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_cipher);
+ async_done_continue(data, err, poly_cipher);
}
static int poly_adpad(struct aead_request *req)
@@ -279,9 +279,9 @@ static int poly_adpad(struct aead_request *req)
return poly_cipher(req);
}
-static void poly_ad_done(struct crypto_async_request *areq, int err)
+static void poly_ad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_adpad);
+ async_done_continue(data, err, poly_adpad);
}
static int poly_ad(struct aead_request *req)
@@ -303,9 +303,9 @@ static int poly_ad(struct aead_request *req)
return poly_adpad(req);
}
-static void poly_setkey_done(struct crypto_async_request *areq, int err)
+static void poly_setkey_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_ad);
+ async_done_continue(data, err, poly_ad);
}
static int poly_setkey(struct aead_request *req)
@@ -329,9 +329,9 @@ static int poly_setkey(struct aead_request *req)
return poly_ad(req);
}
-static void poly_init_done(struct crypto_async_request *areq, int err)
+static void poly_init_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_setkey);
+ async_done_continue(data, err, poly_setkey);
}
static int poly_init(struct aead_request *req)
@@ -352,9 +352,9 @@ static int poly_init(struct aead_request *req)
return poly_setkey(req);
}
-static void poly_genkey_done(struct crypto_async_request *areq, int err)
+static void poly_genkey_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_init);
+ async_done_continue(data, err, poly_init);
}
static int poly_genkey(struct aead_request *req)
@@ -391,9 +391,9 @@ static int poly_genkey(struct aead_request *req)
return poly_init(req);
}
-static void chacha_encrypt_done(struct crypto_async_request *areq, int err)
+static void chacha_encrypt_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_genkey);
+ async_done_continue(data, err, poly_genkey);
}
static int chacha_encrypt(struct aead_request *req)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ca3a40fc7da9..37365ed30b38 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -72,7 +72,6 @@ struct cryptd_skcipher_ctx {
};
struct cryptd_skcipher_request_ctx {
- crypto_completion_t complete;
struct skcipher_request req;
};
@@ -83,6 +82,7 @@ struct cryptd_hash_ctx {
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ void *data;
struct shash_desc desc;
};
@@ -92,7 +92,7 @@ struct cryptd_aead_ctx {
};
struct cryptd_aead_request_ctx {
- crypto_completion_t complete;
+ struct aead_request req;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -177,8 +177,8 @@ static void cryptd_queue_worker(struct work_struct *work)
return;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
+ crypto_request_complete(backlog, -EINPROGRESS);
+ crypto_request_complete(req, 0);
if (cpu_queue->queue.qlen)
queue_work(cryptd_wq, &cpu_queue->work);
@@ -237,33 +237,22 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
return crypto_skcipher_setkey(child, key, keylen);
}
-static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- int refcnt = refcount_read(&ctx->refcnt);
-
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
- crypto_free_skcipher(tfm);
-}
-
-static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
- int err)
+static struct skcipher_request *cryptd_skcipher_prepare(
+ struct skcipher_request *req, int err)
{
- struct skcipher_request *req = skcipher_request_cast(base);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->req;
- struct crypto_skcipher *child = ctx->child;
+ struct cryptd_skcipher_ctx *ctx;
+ struct crypto_skcipher *child;
+
+ req->base.complete = subreq->base.complete;
+ req->base.data = subreq->base.data;
if (unlikely(err == -EINPROGRESS))
- goto out;
+ return NULL;
+
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ child = ctx->child;
skcipher_request_set_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -271,41 +260,53 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
-
- req->base.complete = rctx->complete;
-
-out:
- cryptd_skcipher_complete(req, err);
+ return subreq;
}
-static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
- int err)
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
+ crypto_completion_t complete)
{
- struct skcipher_request *req = skcipher_request_cast(base);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->req;
- struct crypto_skcipher *child = ctx->child;
+ int refcnt = refcount_read(&ctx->refcnt);
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ local_bh_disable();
+ skcipher_request_complete(req, err);
+ local_bh_enable();
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
+ if (unlikely(err == -EINPROGRESS)) {
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
+ req->base.complete = complete;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(tfm);
+}
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+static void cryptd_skcipher_encrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
- req->base.complete = rctx->complete;
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_encrypt(subreq);
-out:
- cryptd_skcipher_complete(req, err);
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
+}
+
+static void cryptd_skcipher_decrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
+
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_decrypt(subreq);
+
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
}
static int cryptd_skcipher_enqueue(struct skcipher_request *req,
@@ -313,11 +314,14 @@ static int cryptd_skcipher_enqueue(struct skcipher_request *req,
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req