summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-26 08:32:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-26 08:32:52 -0700
commit733f7e9c18c5e377025c1bfdce6bc9a7d55649be (patch)
tree19adc4c70522756ef682181d58b231005fed5a32 /crypto
parent98f99e67a1dc456e9a542584819b2aa265ffc737 (diff)
parent482c84e906e535072c55395acabd3a58e9443d12 (diff)
downloadlinux-733f7e9c18c5e377025c1bfdce6bc9a7d55649be.tar.gz
linux-733f7e9c18c5e377025c1bfdce6bc9a7d55649be.tar.bz2
linux-733f7e9c18c5e377025c1bfdce6bc9a7d55649be.zip
Merge tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Total usage stats now include all that returned errors (instead of just some) - Remove maximum hash statesize limit - Add cloning support for hmac and unkeyed hashes - Demote BUG_ON in crypto_unregister_alg to a WARN_ON Algorithms: - Use RIP-relative addressing on x86 to prepare for PIE build - Add accelerated AES/GCM stitched implementation on powerpc P10 - Add some test vectors for cmac(camellia) - Remove failure case where jent is unavailable outside of FIPS mode in drbg - Add permanent and intermittent health error checks in jitter RNG Drivers: - Add support for 402xx devices in qat - Add support for HiSTB TRNG - Fix hash concurrency issues in stm32 - Add OP-TEE firmware support in caam" * tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (139 commits) i2c: designware: Add doorbell support for Mendocino i2c: designware: Use PCI PSP driver for communication powerpc: Move Power10 feature PPC_MODULE_FEATURE_P10 crypto: p10-aes-gcm - Remove POWER10_CPU dependency crypto: testmgr - Add some test vectors for cmac(camellia) crypto: cryptd - Add support for cloning hashes crypto: cryptd - Convert hash to use modern init_tfm/exit_tfm crypto: hmac - Add support for cloning crypto: hash - Add crypto_clone_ahash/shash crypto: api - Add crypto_clone_tfm crypto: api - Add crypto_tfm_get crypto: x86/sha - Use local .L symbols for code crypto: x86/crc32 - Use local .L symbols for code crypto: x86/aesni - Use local .L symbols for code crypto: x86/sha256 - Use RIP-relative addressing crypto: x86/ghash - Use RIP-relative addressing crypto: x86/des3 - Use RIP-relative addressing crypto: x86/crc32c - Use RIP-relative addressing crypto: x86/cast6 - Use RIP-relative addressing crypto: x86/cast5 - Use RIP-relative addressing ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/acompress.c81
-rw-r--r--crypto/aead.c98
-rw-r--r--crypto/ahash.c144
-rw-r--r--crypto/akcipher.c52
-rw-r--r--crypto/algapi.c219
-rw-r--r--crypto/algif_hash.c19
-rw-r--r--crypto/api.c63
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/compress.h26
-rw-r--r--crypto/cryptd.c34
-rw-r--r--crypto/crypto_user_stat.c183
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/fips.c11
-rw-r--r--crypto/hash.h40
-rw-r--r--crypto/hmac.c15
-rw-r--r--crypto/internal.h10
-rw-r--r--crypto/jitterentropy-kcapi.c51
-rw-r--r--crypto/jitterentropy.c144
-rw-r--r--crypto/jitterentropy.h1
-rw-r--r--crypto/kpp.c53
-rw-r--r--crypto/rng.c65
-rw-r--r--crypto/scompress.c39
-rw-r--r--crypto/shash.c181
-rw-r--r--crypto/skcipher.c113
-rw-r--r--crypto/tcrypt.c11
-rw-r--r--crypto/testmgr.c272
-rw-r--r--crypto/testmgr.h47
28 files changed, 1137 insertions, 851 deletions
diff --git a/crypto/acompress.c b/crypto/acompress.c
index c32c72048a1c..82a290df2822 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -6,25 +6,35 @@
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
+
+#include <crypto/internal/acompress.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
+
+struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
-#ifdef CONFIG_NET
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, calg.base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static int __maybe_unused crypto_acomp_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
@@ -34,12 +44,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
-#else
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -89,13 +93,44 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+ struct crypto_istat_compress *istat = comp_get_stat(calg);
+ struct crypto_stat_compress racomp;
+
+ memset(&racomp, 0, sizeof(racomp));
+
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
+ racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+ racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+ racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
+ racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+ racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_acomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -147,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
}
EXPORT_SYMBOL_GPL(acomp_request_free);
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
{
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
- base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/aead.c b/crypto/aead.c
index 16991095270d..ffc48a7dfb34 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -8,17 +8,27 @@
*/
#include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -80,39 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stats_aead_encrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_alg *alg = aead->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
int ret;
- crypto_stats_get(alg);
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
- ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stats_aead_decrypt(cryptlen, alg, ret);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -142,8 +175,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-#ifdef CONFIG_NET
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_aead_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@@ -159,12 +192,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
-#else
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -188,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
+static int __maybe_unused crypto_aead_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+ struct crypto_istat_aead *istat = aead_get_stat(aead);
+ struct crypto_stat_aead raead;
+
+ memset(&raead, 0, sizeof(raead));
+
+ strscpy(raead.type, "aead", sizeof(raead.type));
+
+ raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@@ -195,7 +242,12 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_aead_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_aead_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@@ -219,6 +271,7 @@ EXPORT_SYMBOL_GPL(crypto_alloc_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
+ struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -232,6 +285,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ff8c79d975c1..b8a607928e72 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -8,19 +8,18 @@
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
#include <net/netlink.h>
-#include "internal.h"
+#include "hash.h"
static const struct crypto_type crypto_ahash_type;
@@ -296,55 +295,60 @@ static int crypto_ahash_op(struct ahash_request *req,
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op, has_state);
+ err = ahash_op_unaligned(req, op, has_state);
+ else
+ err = op(req);
- return op(req);
+ return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
}
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&hash_get_stat(alg)->hash_cnt);
+
+ return crypto_ahash_op(req, tfm->final, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
- crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
+
+ return crypto_ahash_op(req, tfm->finup, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
+ struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
- crypto_stats_get(alg);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- ret = -ENOKEY;
- else
- ret = crypto_ahash_op(req, tfm->digest, false);
- crypto_stats_ahash_final(nbytes, ret, alg);
- return ret;
+ return crypto_hash_errstat(alg, -ENOKEY);
+
+ return crypto_ahash_op(req, tfm->digest, false);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -465,8 +469,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
ahash->free(ahash);
}
-#ifdef CONFIG_NET
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_ahash_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
@@ -479,12 +483,6 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
-#else
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -498,6 +496,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
+static int __maybe_unused crypto_ahash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@@ -505,7 +509,12 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_ahash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_ahash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
@@ -534,17 +543,70 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
+{
+ struct hash_alg_common *halg = crypto_hash_alg_common(hash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *nhash;
+ struct ahash_alg *alg;
+ int err;
+
+ if (!crypto_hash_alg_has_setkey(halg)) {
+ tfm = crypto_tfm_get(tfm);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ return hash;
+ }
+
+ nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
+
+ if (IS_ERR(nhash))
+ return nhash;
+
+ nhash->init = hash->init;
+ nhash->update = hash->update;
+ nhash->final = hash->final;
+ nhash->finup = hash->finup;
+ nhash->digest = hash->digest;
+ nhash->export = hash->export;
+ nhash->import = hash->import;
+ nhash->setkey = hash->setkey;
+ nhash->reqsize = hash->reqsize;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_clone_shash_ops_async(nhash, hash);
+
+ err = -ENOSYS;
+ alg = crypto_ahash_alg(hash);
+ if (!alg->clone_tfm)
+ goto out_free_nhash;
+
+ err = alg->clone_tfm(nhash, hash);
+ if (err)
+ goto out_free_nhash;
+
+ return nhash;
+
+out_free_nhash:
+ crypto_free_ahash(nhash);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
+ int err;
- if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
- alg->halg.statesize > HASH_MAX_STATESIZE ||
- alg->halg.statesize == 0)
+ if (alg->halg.statesize == 0)
return -EINVAL;
+ err = hash_prepare_alg(&alg->halg);
+ if (err)
+ return err;
+
base->cra_type = &crypto_ahash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index ab975a420e1e..186e762b509a 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -5,23 +5,20 @@
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
+#include <crypto/internal/akcipher.h>
+#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
#include <net/netlink.h>
-#include <crypto/akcipher.h>
-#include <crypto/internal/akcipher.h>
+
#include "internal.h"
-#ifdef CONFIG_NET
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_akcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -32,12 +29,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
-#else
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
@@ -76,6 +67,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
+static int __maybe_unused crypto_akcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+ struct crypto_istat_akcipher *istat;
+ struct crypto_stat_akcipher rakcipher;
+
+ istat = akcipher_get_stat(akcipher);
+
+ memset(&rakcipher, 0, sizeof(rakcipher));
+
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+ rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@@ -83,7 +98,12 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
+#ifdef CONFIG_CRYPTO_USER
.report = crypto_akcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_akcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -108,11 +128,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d08f864f08be..d7eb8f9e9883 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -339,8 +339,6 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&alg->cra_list, &crypto_alg_list);
- crypto_stats_init(alg);
-
if (larval) {
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
@@ -493,7 +491,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+ return;
+
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -1038,219 +1038,6 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg)
-{
- memset(&alg->stats, 0, sizeof(alg->stats));
-}
-EXPORT_SYMBOL_GPL(crypto_stats_init);
-
-void crypto_stats_get(struct crypto_alg *alg)
-{
- crypto_alg_get(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_get);
-
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.encrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
- int ret)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.err_cnt);
- } else {
- atomic64_inc(&alg->stats.aead.decrypt_cnt);
- atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
-
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
- atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
-
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.sign_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
-
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.err_cnt);
- else
- atomic64_inc(&alg->stats.akcipher.verify_cnt);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
-
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.compress_cnt);
- atomic64_add(slen, &alg->stats.compress.compress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_compress);
-
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.err_cnt);
- } else {
- atomic64_inc(&alg->stats.compress.decompress_cnt);
- atomic64_add(slen, &alg->stats.compress.decompress_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_decompress);
-
-void crypto_stats_ahash_update(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.hash.err_cnt);
- else
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
-
-void crypto_stats_ahash_final(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.hash.err_cnt);
- } else {
- atomic64_inc(&alg->stats.hash.hash_cnt);
- atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);