diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 17:45:14 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 17:45:14 -0700 |
| commit | c2a24a7a036b3bd3a2e6c66730dfc777cae6540a (patch) | |
| tree | 659b1c18156bd402d85514a724c47adbc6de0f0d /crypto | |
| parent | a0b09f2d6f30723e1008bd9ddb504e302e329f81 (diff) | |
| parent | af5d35b83f642399c719ea9a8599a13b8a0c4167 (diff) | |
| download | linux-c2a24a7a036b3bd3a2e6c66730dfc777cae6540a.tar.gz linux-c2a24a7a036b3bd3a2e6c66730dfc777cae6540a.tar.bz2 linux-c2a24a7a036b3bd3a2e6c66730dfc777cae6540a.zip | |
Merge tag 'v5.20-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Make proc files report fips module name and version
Algorithms:
- Move generic SHA1 code into lib/crypto
- Implement Chinese Remainder Theorem for RSA
- Remove blake2s
- Add XCTR with x86/arm64 acceleration
- Add POLYVAL with x86/arm64 acceleration
- Add HCTR2
- Add ARIA
Drivers:
- Add support for new CCP/PSP device ID in ccp"
* tag 'v5.20-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (89 commits)
crypto: tcrypt - Remove the static variable initialisations to NULL
crypto: arm64/poly1305 - fix a read out-of-bound
crypto: hisilicon/zip - Use the bitmap API to allocate bitmaps
crypto: hisilicon/sec - fix auth key size error
crypto: ccree - Remove a useless dma_supported() call
crypto: ccp - Add support for new CCP/PSP device ID
crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of
crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq
crypto: testmgr - some more fixes to RSA test vectors
cyrpto: powerpc/aes - delete the rebundant word "block" in comments
hwrng: via - Fix comment typo
crypto: twofish - Fix comment typo
crypto: rmd160 - fix Kconfig "its" grammar
crypto: keembay-ocs-ecc - Drop if with an always false condition
Documentation: qat: rewrite description
Documentation: qat: Use code block for qat sysfs example
crypto: lib - add module license to libsha1
crypto: lib - make the sha1 library optional
crypto: lib - move lib/sha1.c into lib/crypto/
crypto: fips - make proc files report fips module name and version
...
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/Kconfig | 98 | ||||
| -rw-r--r-- | crypto/Makefile | 5 | ||||
| -rw-r--r-- | crypto/aria.c | 288 | ||||
| -rw-r--r-- | crypto/blake2s_generic.c | 75 | ||||
| -rw-r--r-- | crypto/fips.c | 35 | ||||
| -rw-r--r-- | crypto/hctr2.c | 581 | ||||
| -rw-r--r-- | crypto/polyval-generic.c | 245 | ||||
| -rw-r--r-- | crypto/rsa.c | 78 | ||||
| -rw-r--r-- | crypto/tcrypt.c | 62 | ||||
| -rw-r--r-- | crypto/testmgr.c | 75 | ||||
| -rw-r--r-- | crypto/testmgr.h | 4830 | ||||
| -rw-r--r-- | crypto/twofish_common.c | 2 | ||||
| -rw-r--r-- | crypto/xctr.c | 191 |
13 files changed, 6145 insertions, 420 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 7b81685b5655..bb427a835e44 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -33,6 +33,27 @@ config CRYPTO_FIPS certification. You should say no unless you know what this is. +config CRYPTO_FIPS_NAME + string "FIPS Module Name" + default "Linux Kernel Cryptographic API" + depends on CRYPTO_FIPS + help + This option sets the FIPS Module name reported by the Crypto API via + the /proc/sys/crypto/fips_name file. + +config CRYPTO_FIPS_CUSTOM_VERSION + bool "Use Custom FIPS Module Version" + depends on CRYPTO_FIPS + default n + +config CRYPTO_FIPS_VERSION + string "FIPS Module Version" + default "(none)" + depends on CRYPTO_FIPS_CUSTOM_VERSION + help + This option provides the ability to override the FIPS Module Version. + By default the KERNELRELEASE value is used. + config CRYPTO_ALGAPI tristate select CRYPTO_ALGAPI2 @@ -461,6 +482,15 @@ config CRYPTO_PCBC PCBC: Propagating Cipher Block Chaining mode This block cipher algorithm is required for RxRPC. +config CRYPTO_XCTR + tristate + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + help + XCTR: XOR Counter mode. This blockcipher mode is a variant of CTR mode + using XORs and little-endian addition rather than big-endian arithmetic. + XCTR mode is used to implement HCTR2. + config CRYPTO_XTS tristate "XTS support" select CRYPTO_SKCIPHER @@ -524,6 +554,17 @@ config CRYPTO_ADIANTUM If unsure, say N. +config CRYPTO_HCTR2 + tristate "HCTR2 support" + select CRYPTO_XCTR + select CRYPTO_POLYVAL + select CRYPTO_MANAGER + help + HCTR2 is a length-preserving encryption mode for storage encryption that + is efficient on processors with instructions to accelerate AES and + carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and + ARM processors with the ARMv8 crypto extensions. + config CRYPTO_ESSIV tristate "ESSIV support for block encryption" select CRYPTO_AUTHENC @@ -704,26 +745,8 @@ config CRYPTO_BLAKE2B See https://blake2.net for further information. -config CRYPTO_BLAKE2S - tristate "BLAKE2s digest algorithm" - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_HASH - help - Implementation of cryptographic hash function BLAKE2s - optimized for 8-32bit platforms and can produce digests of any size - between 1 to 32. The keyed hash is also implemented. - - This module provides the following algorithms: - - - blake2s-128 - - blake2s-160 - - blake2s-224 - - blake2s-256 - - See https://blake2.net for further information. - config CRYPTO_BLAKE2S_X86 - tristate "BLAKE2s digest algorithm (x86 accelerated version)" + bool "BLAKE2s digest algorithm (x86 accelerated version)" depends on X86 && 64BIT select CRYPTO_LIB_BLAKE2S_GENERIC select CRYPTO_ARCH_HAVE_LIB_BLAKE2S @@ -777,6 +800,23 @@ config CRYPTO_GHASH GHASH is the hash function used in GCM (Galois/Counter Mode). It is not a general-purpose cryptographic hash function. +config CRYPTO_POLYVAL + tristate + select CRYPTO_GF128MUL + select CRYPTO_HASH + help + POLYVAL is the hash function used in HCTR2. It is not a general-purpose + cryptographic hash function. + +config CRYPTO_POLYVAL_CLMUL_NI + tristate "POLYVAL hash function (CLMUL-NI accelerated)" + depends on X86 && 64BIT + select CRYPTO_POLYVAL + help + This is the x86_64 CLMUL-NI accelerated implementation of POLYVAL. It is + used to efficiently implement HCTR2 on x86-64 processors that support + carry-less multiplication instructions. + config CRYPTO_POLY1305 tristate "Poly1305 authenticator algorithm" select CRYPTO_HASH @@ -861,7 +901,7 @@ config CRYPTO_RMD160 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended to be used as a secure replacement for the 128-bit hash functions - MD4, MD5 and it's predecessor RIPEMD + MD4, MD5 and its predecessor RIPEMD (not to be confused with RIPEMD-128). It's speed is comparable to SHA1 and there are no known attacks @@ -873,6 +913,7 @@ config CRYPTO_RMD160 config CRYPTO_SHA1 tristate "SHA1 digest algorithm" select CRYPTO_HASH + select CRYPTO_LIB_SHA1 help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). @@ -1214,7 +1255,7 @@ config CRYPTO_AES_NI_INTEL In addition to AES cipher algorithm support, the acceleration for some popular block cipher mode is supported too, including ECB, CBC, LRW, XTS. The 64 bit version has additional - acceleration for CTR. + acceleration for CTR and XCTR. config CRYPTO_AES_SPARC64 tristate "AES cipher algorithms (SPARC64)" @@ -1603,6 +1644,21 @@ config CRYPTO_SEED See also: <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp> +config CRYPTO_ARIA + tristate "ARIA cipher algorithm" + select CRYPTO_ALGAPI + help + ARIA cipher algorithm (RFC5794). + + ARIA is a standard encryption algorithm of the Republic of Korea. + The ARIA specifies three key sizes and rounds. + 128-bit: 12 rounds. + 192-bit: 14 rounds. + 256-bit: 16 rounds. + + See also: + <https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do> + config CRYPTO_SERPENT tristate "Serpent cipher algorithm" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index ceaaa9f34145..167c004dbf4f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -84,7 +84,6 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o -obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o @@ -94,6 +93,8 @@ obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o +obj-$(CONFIG_CRYPTO_XCTR) += xctr.o +obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o @@ -147,6 +148,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_ARIA) += aria.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o @@ -171,6 +173,7 @@ UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o +obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o diff --git a/crypto/aria.c b/crypto/aria.c new file mode 100644 index 000000000000..ac3dffac34bb --- /dev/null +++ b/crypto/aria.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#include <crypto/aria.h> + +static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + const __be32 *key = (const __be32 *)in_key; + u32 w0[4], w1[4], w2[4], w3[4]; + u32 reg0, reg1, reg2, reg3; + const u32 *ck; + int rkidx = 0; + + ck = &key_rc[(key_len - 16) / 8][0]; + + w0[0] = be32_to_cpu(key[0]); + w0[1] = be32_to_cpu(key[1]); + w0[2] = be32_to_cpu(key[2]); + w0[3] = be32_to_cpu(key[3]); + + reg0 = w0[0] ^ ck[0]; + reg1 = w0[1] ^ ck[1]; + reg2 = w0[2] ^ ck[2]; + reg3 = w0[3] ^ ck[3]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + if (key_len > 16) { + w1[0] = be32_to_cpu(key[4]); + w1[1] = be32_to_cpu(key[5]); + if (key_len > 24) { + w1[2] = be32_to_cpu(key[6]); + w1[3] = be32_to_cpu(key[7]); + } else { + w1[2] = 0; + w1[3] = 0; + } + } else { + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + } + + w1[0] ^= reg0; + w1[1] ^= reg1; + w1[2] ^= reg2; + w1[3] ^= reg3; + + reg0 = w1[0]; + reg1 = w1[1]; + reg2 = w1[2]; + reg3 = w1[3]; + + reg0 ^= ck[4]; + reg1 ^= ck[5]; + reg2 ^= ck[6]; + reg3 ^= ck[7]; + + aria_subst_diff_even(®0, ®1, ®2, ®3); + + reg0 ^= w0[0]; + reg1 ^= w0[1]; + reg2 ^= w0[2]; + reg3 ^= w0[3]; + + w2[0] = reg0; + w2[1] = reg1; + w2[2] = reg2; + w2[3] = reg3; + + reg0 ^= ck[8]; + reg1 ^= ck[9]; + reg2 ^= ck[10]; + reg3 ^= ck[11]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + w3[0] = reg0 ^ w1[0]; + w3[1] = reg1 ^ w1[1]; + w3[2] = reg2 ^ w1[2]; + w3[3] = reg3 ^ w1[3]; + + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97); + if (key_len > 16) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97); + + if (key_len > 24) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109); + } + } +} + +static void aria_set_decrypt_key(struct aria_ctx *ctx) +{ + int i; + + for (i = 0; i < 4; i++) { + ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i]; + ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i]; + } + + for (i = 1; i < ctx->rounds; i++) { + ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]); + ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]); + ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]); + ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]); + + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_byte(&ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + } +} + +static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + if (key_len != 16 && key_len != 24 && key_len != 32) + return -EINVAL; + + ctx->key_length = key_len; + ctx->rounds = (key_len + 32) / 4; + + aria_set_encrypt_key(ctx, in_key, key_len); + aria_set_decrypt_key(ctx); + + return 0; +} + +static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, + u32 key[][ARIA_RD_KEY_WORDS]) +{ + const __be32 *src = (const __be32 *)in; + __be32 *dst = (__be32 *)out; + u32 reg0, reg1, reg2, reg3; + int rounds, rkidx = 0; + + rounds = ctx->rounds; + + reg0 = be32_to_cpu(src[0]); + reg1 = be32_to_cpu(src[1]); + reg2 = be32_to_cpu(src[2]); + reg3 = be32_to_cpu(src[3]); + + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + while ((rounds -= 2) > 0) { + aria_subst_diff_even(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + } + + reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]), + (u8)(x2[get_u8(reg0, 1)] >> 8), + (u8)(s1[get_u8(reg0, 2)]), + (u8)(s2[get_u8(reg0, 3)])); + reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]), + (u8)(x2[get_u8(reg1, 1)] >> 8), + (u8)(s1[get_u8(reg1, 2)]), + (u8)(s2[get_u8(reg1, 3)])); + reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]), + (u8)(x2[get_u8(reg2, 1)] >> 8), + (u8)(s1[get_u8(reg2, 2)]), + (u8)(s2[get_u8(reg2, 3)])); + reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]), + (u8)(x2[get_u8(reg3, 1)] >> 8), + (u8)(s1[get_u8(reg3, 2)]), + (u8)(s2[get_u8(reg3, 3)])); + + dst[0] = cpu_to_be32(reg0); + dst[1] = cpu_to_be32(reg1); + dst[2] = cpu_to_be32(reg2); + dst[3] = cpu_to_be32(reg3); +} + +static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->enc_key); +} + +static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->dec_key); +} + +static struct crypto_alg aria_alg = { + .cra_name = "aria", + .cra_driver_name = "aria-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = ARIA_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aria_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = ARIA_MIN_KEY_SIZE, + .cia_max_keysize = ARIA_MAX_KEY_SIZE, + .cia_setkey = aria_set_key, + .cia_encrypt = aria_encrypt, + .cia_decrypt = aria_decrypt + } + } +}; + +static int __init aria_init(void) +{ + return crypto_register_alg(&aria_alg); +} + +static void __exit aria_fini(void) +{ + crypto_unregister_alg(&aria_alg); +} + +subsys_initcall(aria_init); +module_exit(aria_fini); + +MODULE_DESCRIPTION("ARIA Cipher Algorithm"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); +MODULE_ALIAS_CRYPTO("aria"); diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c deleted file mode 100644 index 5f96a21f8788..000000000000 --- a/crypto/blake2s_generic.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * shash interface to the generic implementation of BLAKE2s - * - * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -#include <crypto/internal/blake2s.h> -#include <crypto/internal/hash.h> - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> - -static int crypto_blake2s_update_generic(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, true); -} - -static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, true); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_generic, \ - .final = crypto_blake2s_final_generic, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-generic", - BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-generic", - BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-generic", - BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-generic", - BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_mod_init(void) -{ - return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -static void __exit blake2s_mod_exit(void) -{ - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -subsys_initcall(blake2s_mod_init); -module_exit(blake2s_mod_exit); - -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-generic"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-generic"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-generic"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-generic"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/fips.c b/crypto/fips.c index 7b1d8caee669..b05d3c7b3ca5 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/notifier.h> +#include <generated/utsrelease.h> int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -30,13 +31,37 @@ static int fips_enable(char *str) __setup("fips=", fips_enable); +#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME +#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION +#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION +#else +#define FIPS_MODULE_VERSION UTS_RELEASE +#endif + +static char fips_name[] = FIPS_MODULE_NAME; +static char fips_version[] = FIPS_MODULE_VERSION; + static struct ctl_table crypto_sysctl_table[] = { { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + { + .procname = "fips_name", + .data = &fips_name, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring + }, + { + .procname = "fips_version", + .data = &fips_version, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring }, {} }; diff --git a/crypto/hctr2.c b/crypto/hctr2.c new file mode 100644 index 000000000000..7d00a3bcb667 --- /dev/null +++ b/crypto/hctr2.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HCTR2 length-preserving encryption mode + * + * Copyright 2021 Google LLC + */ + + +/* + * HCTR2 is a length-preserving encryption mode that is efficient on + * processors with instructions to accelerate AES and carryless + * multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM + * processors with the ARMv8 crypto extensions. + * + * For more details, see the paper: "Length-preserving encryption with HCTR2" + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include <crypto/internal/cipher.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/polyval.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> + +#define BLOCKCIPHER_BLOCK_SIZE 16 + +/* + * The specification allows variable-length tweaks, but Linux's crypto API + * currently only allows algorithms to support a single length. The "natural" + * tweak length for HCTR2 is 16, since that fits into one POLYVAL block for + * the best performance. But longer tweaks are useful for fscrypt, to avoid + * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. + */ +#define TWEAK_SIZE 32 + +struct hctr2_instance_ctx { + struct crypto_cipher_spawn blockcipher_spawn; + struct crypto_skcipher_spawn xctr_spawn; + struct crypto_shash_spawn polyval_spawn; +}; + +struct hctr2_tfm_ctx { + struct crypto_cipher *blockcipher; + struct crypto_skcipher *xctr; + struct crypto_shash *polyval; + u8 L[BLOCKCIPHER_BLOCK_SIZE]; + int hashed_tweak_offset; + /* + * This struct is allocated with extra space for two exported hash + * states. Since the hash state size is not known at compile-time, we + * can't add these to the struct directly. + * + * hashed_tweaklen_divisible; + * hashed_tweaklen_remainder; + */ +}; + +struct hctr2_request_ctx { + u8 first_block[BLOCKCIPHER_BLOCK_SIZE]; + u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE]; + struct scatterlist *bulk_part_dst; + struct scatterlist *bulk_part_src; + struct scatterlist sg_src[2]; + struct scatterlist sg_dst[2]; + /* + * Sub-request sizes are unknown at compile-time, so they need to go + * after the members with known sizes. + */ + union { + struct shash_desc hash_desc; + struct skcipher_request xctr_req; + } u; + /* + * This struct is allocated with extra space for one exported hash + * state. Since the hash state size is not known at compile-time, we + * can't add it to the struct directly. + * + * hashed_tweak; + */ +}; + +static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx, + bool has_remainder) +{ + u8 *p = (u8 *)tctx + sizeof(*tctx); + + if (has_remainder) /* For messages not a multiple of block length */ + p += crypto_shash_statesize(tctx->polyval); + return p; +} + +static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx, + struct hctr2_request_ctx *rctx) +{ + return (u8 *)rctx + tctx->hashed_tweak_offset; +} + +/* + * The input data for each HCTR2 hash step begins with a 16-byte block that + * contains the tweak length and a flag that indicates whether the input is evenly + * divisible into blocks. Since this implementation only supports one tweak + * length, we precompute the two hash states resulting from hashing the two + * possible values of this initial block. This reduces by one block the amount of + * data that needs to be hashed for each encryption/decryption + * + * These precomputed hashes are stored in hctr2_tfm_ctx. + */ +static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder) +{ + SHASH_DESC_ON_STACK(shash, tfm->polyval); + __le64 tweak_length_block[2]; + int err; + + shash->tfm = tctx->polyval; + memset(tweak_length_block, 0, sizeof(tweak_length_block)); + + tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder); + err = crypto_shash_init(shash); + if (err) + return err; + err = crypto_shash_update(shash, (u8 *)tweak_length_block, + POLYVAL_BLOCK_SIZE); + if (err) + return err; + return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder)); +} + +static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + u8 hbar[BLOCKCIPHER_BLOCK_SIZE]; + int err; + + crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tctx->blockcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(tctx->blockcipher, key, keylen); + if (err) + return err; + + crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(tctx->xctr, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(tctx->xctr, key, keylen); + if (err) + return err; + + memset(hbar, 0, sizeof(hbar)); + crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar); + + memset(tctx->L, 0, sizeof(tctx->L)); + tctx->L[0] = 0x01; + crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L); + + crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE); + if (err) + return err; + memzero_explicit(hbar, sizeof(hbar)); + + return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false); +} + +static int hctr2_hash_tweak(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE; + + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder)); + if (err) + return err; + err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE); + if (err) + return err; + + // Store the hashed tweak, since we need it when computing both + // H(T || N) and H(T || V). + return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx)); +} + +static int hctr2_hash_message(struct skcipher_request *req, + struct scatterlist *sgl, + u8 digest[POLYVAL_DIGEST_SIZE]) +{ + static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 }; + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct sg_mapping_iter miter; + unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE; + int i; + int err = 0; + int n = 0; + + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < bulk_len; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, bulk_len - i); + err = crypto_shash_update(hash_desc, miter.addr, n); + if (err) + break; + } + sg_miter_stop(&miter); + + if (err) + return err; + + if (remainder) { + err = crypto_shash_update(hash_desc, padding, + BLOCKCIPHER_BLOCK_SIZE - remainder); + if (err) + return err; + } + return crypto_shash_final(hash_desc, digest); +} + +static int hctr2_finish(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + + // U = UU ^ H(T || V) + // or M = MM ^ H(T || N) + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx)); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_dst, digest); + if (err) + return err; + crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE); + + // Copy U (or M) into dst scatterlist + scatterwalk_map_and_copy(rctx->first_block, req->dst, + 0, BLOCKCIPHER_BLOCK_SIZE, 1); + return 0; +} + +static void hctr2_xctr_done(struct crypto_async_request *areq, + int err) +{ + struct skcipher_request *req = areq->data; + + if (!err) + err = hctr2_finish(req); + + skcipher_request_complete(req, err); +} + +static int hctr2_crypt(struct skcipher_request *req, bool enc) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + int err; + + // Requests must be at least one block + if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) + return -EINVAL; + + // Copy M (or U) into a temporary buffer + scatterwalk_map_and_copy(rctx->first_block, req->src, + 0, BLOCKCIPHER_BLOCK_SIZE, 0); + + // Create scatterlists for N and V + rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src, + BLOCKCIPHER_BLOCK_SIZE); + rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + BLOCKCIPHER_BLOCK_SIZE); + + // MM = M ^ H(T || N) + // or UU = U ^ H(T || V) + err = hctr2_hash_tweak(req); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_src, digest); + if (err) + return err; + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + + // UU = E(MM) + // or MM = D(UU) + if (enc) + crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block, + digest); + else + crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block, + digest); + + // S = MM ^ UU ^ L + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE); + + // V = XCTR(S, N) + // or N = XCTR(S, V) + skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr); + skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src, + rctx->bulk_part_dst, bulk_len, + rctx->xctr_iv); + skcipher_request_set_callback(&rctx->u.xctr_req, + req->base.flags, + hctr2_xctr_done, req); + return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?: + hctr2_finish(req); +} + +static int hctr2_encrypt(struct skciph |
