diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-10 14:04:16 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-10 14:04:16 -0700 |
| commit | 30066ce675d3af350bc5a53858991c0b518dda00 (patch) | |
| tree | 75db2274cd0887b11b4e297771287f0fb4c14b81 | |
| parent | 6763afe4b9f39142bda2a92d69e62fe85f67251c (diff) | |
| parent | c3afafa47898e34eb49828ec4ac92bcdc81c8f0c (diff) | |
| download | linux-30066ce675d3af350bc5a53858991c0b518dda00.tar.gz linux-30066ce675d3af350bc5a53858991c0b518dda00.tar.bz2 linux-30066ce675d3af350bc5a53858991c0b518dda00.zip | |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.9:
API:
- The crypto engine code now supports hashes.
Algorithms:
- Allow keys >= 2048 bits in FIPS mode for RSA.
Drivers:
- Memory overwrite fix for vmx ghash.
- Add support for building ARM sha1-neon in Thumb2 mode.
- Reenable ARM ghash-ce code by adding import/export.
- Reenable img-hash by adding import/export.
- Add support for multiple cores in omap-aes.
- Add little-endian support for sha1-powerpc.
- Add Cavium HWRNG driver for ThunderX SoC"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits)
crypto: caam - treat SGT address pointer as u64
crypto: ccp - Make syslog errors human-readable
crypto: ccp - clean up data structure
crypto: vmx - Ensure ghash-generic is enabled
crypto: testmgr - add guard to dst buffer for ahash_export
crypto: caam - Unmap region obtained by of_iomap
crypto: sha1-powerpc - little-endian support
crypto: gcm - Fix IV buffer size in crypto_gcm_setkey
crypto: vmx - Fix memory corruption caused by p8_ghash
crypto: ghash-generic - move common definitions to a new header file
crypto: caam - fix sg dump
hwrng: omap - Only fail if pm_runtime_get_sync returns < 0
crypto: omap-sham - shrink the internal buffer size
crypto: omap-sham - add support for export/import
crypto: omap-sham - convert driver logic to use sgs for data xmit
crypto: omap-sham - change the DMA threshold value to a define
crypto: omap-sham - add support functions for sg based data handling
crypto: omap-sham - rename sgl to sgl_tmp for deprecation
crypto: omap-sham - align algorithms on word offset
crypto: omap-sham - add context export/import stubs
...
77 files changed, 3852 insertions, 1654 deletions
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl index fb2a1526f6ec..088b79c341ff 100644 --- a/Documentation/DocBook/crypto-API.tmpl +++ b/Documentation/DocBook/crypto-API.tmpl @@ -797,7 +797,8 @@ kernel crypto API | Caller include/linux/crypto.h and their definition can be seen below. The former function registers a single transformation, while the latter works on an array of transformation descriptions. - The latter is useful when registering transformations in bulk. + The latter is useful when registering transformations in bulk, + for example when a driver implements multiple transformations. </para> <programlisting> @@ -822,18 +823,31 @@ kernel crypto API | Caller </para> <para> - The bulk registration / unregistration functions require - that struct crypto_alg is an array of count size. These - functions simply loop over that array and register / - unregister each individual algorithm. If an error occurs, - the loop is terminated at the offending algorithm definition. - That means, the algorithms prior to the offending algorithm - are successfully registered. Note, the caller has no way of - knowing which cipher implementations have successfully - registered. If this is important to know, the caller should - loop through the different implementations using the single - instance *_alg functions for each individual implementation. + The bulk registration/unregistration functions + register/unregister each transformation in the given array of + length count. They handle errors as follows: </para> + <itemizedlist> + <listitem> + <para> + crypto_register_algs() succeeds if and only if it + successfully registers all the given transformations. If an + error occurs partway through, then it rolls back successful + registrations before returning the error code. Note that if + a driver needs to handle registration errors for individual + transformations, then it will need to use the non-bulk + function crypto_register_alg() instead. + </para> + </listitem> + <listitem> + <para> + crypto_unregister_algs() tries to unregister all the given + transformations, continuing on error. It logs errors and + always returns zero. + </para> + </listitem> + </itemizedlist> + </sect1> <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title> diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 1568cb5cd870..7546b3c02466 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = { .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { - .cra_name = "ghash", + .cra_name = "__ghash", .cra_driver_name = "__driver-ghash-ce", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL, @@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req) } } +static int ghash_async_import(struct ahash_request *req, const void *in) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + + desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); + desc->flags = req->base.flags; + + return crypto_shash_import(desc, in); +} + +static int ghash_async_export(struct ahash_request *req, void *out) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + + return crypto_shash_export(desc, out); +} + static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = { .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, + .import = ghash_async_import, + .export = ghash_async_export, .halg.digestsize = GHASH_DIGEST_SIZE, + .halg.statesize = sizeof(struct ghash_desc_ctx), .halg.base = { .cra_name = "ghash", .cra_driver_name = "ghash-ce", diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S index dcd01f3f0bb0..2468fade49cf 100644 --- a/arch/arm/crypto/sha1-armv7-neon.S +++ b/arch/arm/crypto/sha1-armv7-neon.S @@ -12,7 +12,6 @@ #include <asm/assembler.h> .syntax unified -.code 32 .fpu neon .text diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S index 125e16520061..82ddc9bdfeb1 100644 --- a/arch/powerpc/crypto/sha1-powerpc-asm.S +++ b/arch/powerpc/crypto/sha1-powerpc-asm.S @@ -7,6 +7,15 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> +#ifdef __BIG_ENDIAN__ +#define LWZ(rt, d, ra) \ + lwz rt,d(ra) +#else +#define LWZ(rt, d, ra) \ + li rt,d; \ + lwbrx rt,rt,ra +#endif + /* * We roll the registers for T, A, B, C, D, E around on each * iteration; T on iteration t is A on iteration t+1, and so on. @@ -23,7 +32,7 @@ #define W(t) (((t)%16)+16) #define LOADW(t) \ - lwz W(t),(t)*4(r4) + LWZ(W(t),(t)*4,r4) #define STEPD0_LOAD(t) \ andc r0,RD(t),RB(t); \ @@ -33,7 +42,7 @@ add r0,RE(t),r15; \ add RT(t),RT(t),r6; \ add r14,r0,W(t); \ - lwz W((t)+4),((t)+4)*4(r4); \ + LWZ(W((t)+4),((t)+4)*4,r4); \ rotlwi RB(t),RB(t),30; \ add RT(t),RT(t),r14 diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 68a5ceaa04c8..2d8466f9e49b 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -39,6 +39,37 @@ struct algif_hash_tfm { bool has_key; }; +static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) +{ + unsigned ds; + + if (ctx->result) + return 0; + + ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); + + ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); + if (!ctx->result) + return -ENOMEM; + + memset(ctx->result, 0, ds); + + return 0; +} + +static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) +{ + unsigned ds; + + if (!ctx->result) + return; + + ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); + + sock_kzfree_s(sk, ctx->result, ds); + ctx->result = NULL; +} + static int hash_sendmsg(struct socket *sock, struct msghdr *msg, size_t ignored) { @@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); if (!ctx->more) { + if ((msg->msg_flags & MSG_MORE)) + hash_free_result(sk, ctx); + err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), &ctx->completion); if (err) @@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ctx->more = msg->msg_flags & MSG_MORE; if (!ctx->more) { + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock; + ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); @@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, sg_init_table(ctx->sgl.sg, 1); sg_set_page(ctx->sgl.sg, page, size, offset); + if (!(flags & MSG_MORE)) { + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock; + } else if (!ctx->more) + hash_free_result(sk, ctx); + ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); if (!(flags & MSG_MORE)) { @@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); + bool result; int err; if (len > ds) @@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_flags |= MSG_TRUNC; lock_sock(sk); + result = ctx->result; + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock; + + ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); + if (ctx->more) { ctx->more = 0; - ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; + } else if (!result) { + err = af_alg_wait_for_completion( + crypto_ahash_digest(&ctx->req), + &ctx->completion); } err = memcpy_to_msg(msg, ctx->result, len); + hash_free_result(sk, ctx); + unlock: release_sock(sk); @@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk) struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; - sock_kzfree_s(sk, ctx->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); + hash_free_result(sk, ctx); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } @@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) struct algif_hash_tfm *tfm = private; struct crypto_ahash *hash = tfm->hash; unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); - unsigned ds = crypto_ahash_digestsize(hash); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); - if (!ctx->result) { - sock_kfree_s(sk, ctx, len); - return -ENOMEM; - } - - memset(ctx->result, 0, ds); - + ctx->result = NULL; ctx->len = len; ctx->more = 0; af_alg_init_completion(&ctx->completion); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index c1229614c7e3..8e94e29dc6fc 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -107,10 +107,7 @@ static struct shash_alg alg = { static int __init crct10dif_mod_init(void) { - int ret; - - ret = crypto_register_shash(&alg); - return ret; + return crypto_register_shash(&alg); } static void __exit crct10dif_mod_fini(void) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index a55c82dd48ef..bfb92ace2c91 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -14,13 +14,12 @@ #include <linux/err.h> #include <linux/delay.h> +#include <crypto/engine.h> +#include <crypto/internal/hash.h> #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 -void crypto_finalize_request(struct crypto_engine *engine, - struct ablkcipher_request *req, int err); - /** * crypto_pump_requests - dequeue one request from engine queue to process * @engine: the hardware engine @@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine, bool in_kthread) { struct crypto_async_request *async_req, *backlog; - struct ablkcipher_request *req; + struct ahash_request *hreq; + struct ablkcipher_request *breq; unsigned long flags; bool was_busy = false; - int ret; + int ret, rtype; spin_lock_irqsave(&engine->queue_lock, flags); @@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (!async_req) goto out; - req = ablkcipher_request_cast(async_req); - - engine->cur_req = req; + engine->cur_req = async_req; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, spin_unlock_irqrestore(&engine->queue_lock, flags); + rtype = crypto_tfm_alg_type(engine->cur_req->tfm); /* Until here we get the request need to be encrypted successfully */ if (!was_busy && engine->prepare_crypt_hardware) { ret = engine->prepare_crypt_hardware(engine); @@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine, } } - if (engine->prepare_request) { - ret = engine->prepare_request(engine, engine->cur_req); + switch (rtype) { + case CRYPTO_ALG_TYPE_AHASH: + hreq = ahash_request_cast(engine->cur_req); + if (engine->prepare_hash_request) { + ret = engine->prepare_hash_request(engine, hreq); + if (ret) { + pr_err("failed to prepare request: %d\n", ret); + goto req_err; + } + engine->cur_req_prepared = true; + } + ret = engine->hash_one_request(engine, hreq); if (ret) { - pr_err("failed to prepare request: %d\n", ret); + pr_err("failed to hash one request from queue\n"); goto req_err; } - engine->cur_req_prepared = true; - } - - ret = engine->crypt_one_request(engine, engine->cur_req); - if (ret) { - pr_err("failed to crypt one request from queue\n"); - goto req_err; + return; + case CRYPTO_ALG_TYPE_ABLKCIPHER: + breq = ablkcipher_request_cast(engine->cur_req); + if (engine->prepare_cipher_request) { + ret = engine->prepare_cipher_request(engine, breq); + if (ret) { + pr_err("failed to prepare request: %d\n", ret); + goto req_err; + } + engine->cur_req_prepared = true; + } + ret = engine->cipher_one_request(engine, breq); + if (ret) { + pr_err("failed to cipher one request from queue\n"); + goto req_err; + } + return; + default: + pr_err("failed to p |
