summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 10:28:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 10:28:41 -0800
commit02b2f1a7b8ef340e57cae640a52ec7199b0b887d (patch)
tree5f988798262afdeda17dc8f0cd6882d30621de5d /arch/powerpc
parent1af29b34ea7f63c3e7225c324ffa86c9748874e4 (diff)
parent4223414efeae3a8efb4da1e9c9c52a1a44c1c5bf (diff)
downloadlinux-02b2f1a7b8ef340e57cae640a52ec7199b0b887d.tar.gz
linux-02b2f1a7b8ef340e57cae640a52ec7199b0b887d.tar.bz2
linux-02b2f1a7b8ef340e57cae640a52ec7199b0b887d.zip
Merge tag 'v6.13-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add sig driver API - Remove signing/verification from akcipher API - Move crypto_simd_disabled_for_test to lib/crypto - Add WARN_ON for return values from driver that indicates memory corruption Algorithms: - Provide crc32-arch and crc32c-arch through Crypto API - Optimise crc32c code size on x86 - Optimise crct10dif on arm/arm64 - Optimise p10-aes-gcm on powerpc - Optimise aegis128 on x86 - Output full sample from test interface in jitter RNG - Retry without padata when it fails in pcrypt Drivers: - Add support for Airoha EN7581 TRNG - Add support for STM32MP25x platforms in stm32 - Enable iproc-r200 RNG driver on BCMBCA - Add Broadcom BCM74110 RNG driver" * tag 'v6.13-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (112 commits) crypto: marvell/cesa - fix uninit value for struct mv_cesa_op_ctx crypto: cavium - Fix an error handling path in cpt_ucode_load_fw() crypto: aesni - Move back to module_init crypto: lib/mpi - Export mpi_set_bit crypto: aes-gcm-p10 - Use the correct bit to test for P10 hwrng: amd - remove reference to removed PPC_MAPLE config crypto: arm/crct10dif - Implement plain NEON variant crypto: arm/crct10dif - Macroify PMULL asm code crypto: arm/crct10dif - Use existing mov_l macro instead of __adrl crypto: arm64/crct10dif - Remove remaining 64x64 PMULL fallback code crypto: arm64/crct10dif - Use faster 16x64 bit polynomial multiply crypto: arm64/crct10dif - Remove obsolete chunking logic crypto: bcm - add error check in the ahash_hmac_init function crypto: caam - add error check to caam_rsa_set_priv_key_form hwrng: bcm74110 - Add Broadcom BCM74110 RNG driver dt-bindings: rng: add binding for BCM74110 RNG padata: Clean up in padata_do_multithreaded() crypto: inside-secure - Fix the return value of safexcel_xcbcmac_cra_init() crypto: qat - Fix missing destroy_workqueue in adf_init_aer() crypto: rsassa-pkcs1 - Reinstate support for legacy protocols ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/crypto/Kconfig2
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10-glue.c141
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10.S2421
3 files changed, 1187 insertions, 1377 deletions
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 46a4c85e85e2..951a43726461 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -107,12 +107,12 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
- depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
help
AEAD cipher: AES cipher algorithms (FIPS-197)
GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
index f66ad56e765f..f37b3d13fc53 100644
--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -8,6 +8,7 @@
#include <linux/unaligned.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
+#include <crypto/gcm.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
@@ -24,6 +25,7 @@
#define PPC_ALIGN 16
#define GCM_IV_SIZE 12
+#define RFC4106_NONCE_SIZE 4
MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
@@ -31,7 +33,7 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
- void *key);
+ void *key);
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
@@ -39,7 +41,8 @@ asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
- unsigned char *aad, unsigned int alen);
+ unsigned char *aad, unsigned int alen);
+asmlinkage void gcm_update(u8 *iv, void *Xi);
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
@@ -52,6 +55,7 @@ struct gcm_ctx {
u8 aad_hash[16];
u64 aadLen;
u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
+ u8 pblock[16];
};
struct Hash_ctx {
u8 H[16]; /* subkey */
@@ -60,17 +64,20 @@ struct Hash_ctx {
struct p10_aes_gcm_ctx {
struct aes_key enc_key;
+ u8 nonce[RFC4106_NONCE_SIZE];
};
static void vsx_begin(void)
{
preempt_disable();
+ pagefault_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
+ pagefault_enable();
preempt_enable();
}
@@ -185,7 +192,7 @@ static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
}
static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -198,7 +205,8 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
return ret ? -EINVAL : 0;
}
-static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
+static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
+ int assoclen, int enc)
{
struct crypto_tfm *tfm = req->base.tfm;
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -210,7 +218,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
- unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
@@ -218,11 +225,12 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
u8 otag[16];
int total_processed = 0;
+ int nbytes;
memset(databuf, 0, sizeof(databuf));
memset(hashbuf, 0, sizeof(hashbuf));
memset(ivbuf, 0, sizeof(ivbuf));
- memcpy(iv, req->iv, GCM_IV_SIZE);
+ memcpy(iv, riv, GCM_IV_SIZE);
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
@@ -257,19 +265,25 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
if (ret)
return ret;
- while (walk.nbytes > 0 && ret == 0) {
+ while ((nbytes = walk.nbytes) > 0 && ret == 0) {
+ u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(buf, src, nbytes);
vsx_begin();
if (enc)
- aes_p10_gcm_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- walk.nbytes,
+ aes_p10_gcm_encrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
else
- aes_p10_gcm_decrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- walk.nbytes,
+ aes_p10_gcm_decrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
+
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+
vsx_end();
total_processed += walk.nbytes;
@@ -281,6 +295,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
/* Finalize hash */
vsx_begin();
+ gcm_update(gctx->iv, hash->Htable);
finish_tag(gctx, hash, total_processed);
vsx_end();
@@ -302,17 +317,63 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
return 0;
}
+static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey,
+ unsigned int keylen)
+{
+ struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int err;
+
+ keylen -= RFC4106_NONCE_SIZE;
+ err = p10_aes_gcm_setkey(tfm, inkey, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE);
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int rfc4106_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ u8 iv[AES_BLOCK_SIZE];
+
+ memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
+ memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
+
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 1);
+}
+
+static int rfc4106_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ u8 iv[AES_BLOCK_SIZE];
+
+ memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
+ memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
+
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 0);
+}
+
static int p10_aes_gcm_encrypt(struct aead_request *req)
{
- return p10_aes_gcm_crypt(req, 1);
+ return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 1);
}
static int p10_aes_gcm_decrypt(struct aead_request *req)
{
- return p10_aes_gcm_crypt(req, 0);
+ return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 0);
}
-static struct aead_alg gcm_aes_alg = {
+static struct aead_alg gcm_aes_algs[] = {{
.ivsize = GCM_IV_SIZE,
.maxauthsize = 16,
@@ -321,23 +382,57 @@ static struct aead_alg gcm_aes_alg = {
.encrypt = p10_aes_gcm_encrypt,
.decrypt = p10_aes_gcm_decrypt,
- .base.cra_name = "gcm(aes)",
- .base.cra_driver_name = "aes_gcm_p10",
+ .base.cra_name = "__gcm(aes)",
+ .base.cra_driver_name = "__aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
+ .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx)+
+ 4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
-};
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+}, {
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = 16,
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = rfc4106_encrypt,
+ .decrypt = rfc4106_decrypt,
+
+ .base.cra_name = "__rfc4106(gcm(aes))",
+ .base.cra_driver_name = "__rfc4106_aes_gcm_p10",
+ .base.cra_priority = 2100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx) +
+ 4 * sizeof(u64[2]),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+}};
+
+static struct simd_aead_alg *p10_simd_aeads[ARRAY_SIZE(gcm_aes_algs)];
static int __init p10_init(void)
{
- return crypto_register_aead(&gcm_aes_alg);
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ return 0;
+
+ ret = simd_register_aeads_compat(gcm_aes_algs,
+ ARRAY_SIZE(gcm_aes_algs),
+ p10_simd_aeads);
+ if (ret) {
+ simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
+ p10_simd_aeads);
+ return ret;
+ }
+ return 0;
}
static void __exit p10_exit(void)
{
- crypto_unregister_aead(&gcm_aes_alg);
+ simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
+ p10_simd_aeads);
}
-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
+module_init(p10_init);
module_exit(p10_exit);
diff --git a/arch/powerpc/crypto/aes-gcm-p10.S b/arch/powerpc/crypto/aes-gcm-p10.S
index a51f4b265308..89f50eef3512 100644
--- a/arch/powerpc/crypto/aes-gcm-p10.S
+++ b/arch/powerpc/crypto/aes-gcm-p10.S
@@ -1,42 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
- #
- # Accelerated AES-GCM stitched implementation for ppc64le.
- #
- # Copyright 2022- IBM Inc. All rights reserved
- #
- #===================================================================================
- # Written by Danny Tsen <dtsen@linux.ibm.com>
- #
- # GHASH is based on the Karatsuba multiplication method.
- #
- # Xi xor X1
- #
- # X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H =
- # (X1.h * H4.h + xX.l * H4.l + X1 * H4) +
- # (X2.h * H3.h + X2.l * H3.l + X2 * H3) +
- # (X3.h * H2.h + X3.l * H2.l + X3 * H2) +
- # (X4.h * H.h + X4.l * H.l + X4 * H)
- #
- # Xi = v0
- # H Poly = v2
- # Hash keys = v3 - v14
- # ( H.l, H, H.h)
- # ( H^2.l, H^2, H^2.h)
- # ( H^3.l, H^3, H^3.h)
- # ( H^4.l, H^4, H^4.h)
- #
- # v30 is IV
- # v31 - counter 1
- #
- # AES used,
- # vs0 - vs14 for round keys
- # v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted)
- #
- # This implementation uses stitched AES-GCM approach to improve overall performance.
- # AES is implemented with 8x blocks and GHASH is using 2 4x blocks.
- #
- # ===================================================================================
- #
+#
+# Accelerated AES-GCM stitched implementation for ppc64le.
+#
+# Copyright 2024- IBM Inc.
+#
+#===================================================================================
+# Written by Danny Tsen <dtsen@us.ibm.com>
+#
+# GHASH is based on the Karatsuba multiplication method.
+#
+# Xi xor X1
+#
+# X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H =
+# (X1.h * H4.h + xX.l * H4.l + X1 * H4) +
+# (X2.h * H3.h + X2.l * H3.l + X2 * H3) +
+# (X3.h * H2.h + X3.l * H2.l + X3 * H2) +
+# (X4.h * H.h + X4.l * H.l + X4 * H)
+#
+# Xi = v0
+# H Poly = v2
+# Hash keys = v3 - v14
+# ( H.l, H, H.h)
+# ( H^2.l, H^2, H^2.h)
+# ( H^3.l, H^3, H^3.h)
+# ( H^4.l, H^4, H^4.h)
+#
+# v30 is IV
+# v31 - counter 1
+#
+# AES used,
+# vs0 - round key 0
+# v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted)
+#
+# This implementation uses stitched AES-GCM approach to improve overall performance.
+# AES is implemented with 8x blocks and GHASH is using 2 4x blocks.
+#
+# ===================================================================================
+#
#include <asm/ppc_asm.h>
#include <linux/linkage.h>
@@ -44,483 +44,224 @@
.machine "any"
.text
- # 4x loops
- # v15 - v18 - input states
- # vs1 - vs9 - round keys
- #
-.macro Loop_aes_middle4x
- xxlor 19+32, 1, 1
- xxlor 20+32, 2, 2
- xxlor 21+32, 3, 3
- xxlor 22+32, 4, 4
-
- vcipher 15, 15, 19
- vcipher 16, 16, 19
- vcipher 17, 17, 19
- vcipher 18, 18, 19
-
- vcipher 15, 15, 20
- vcipher 16, 16, 20
- vcipher 17, 17, 20
- vcipher 18, 18, 20
-
- vcipher 15, 15, 21
- vcipher 16, 16, 21
- vcipher 17, 17, 21
- vcipher 18, 18, 21
-
- vcipher 15, 15, 22
- vcipher 16, 16, 22
- vcipher 17, 17, 22
- vcipher 18, 18, 22
-
- xxlor 19+32, 5, 5
- xxlor 20+32, 6, 6
- xxlor 21+32, 7, 7
- xxlor 22+32, 8, 8
-
- vcipher 15, 15, 19
- vcipher 16, 16, 19
- vcipher 17, 17, 19
- vcipher 18, 18, 19
-
- vcipher 15, 15, 20
- vcipher 16, 16, 20
- vcipher 17, 17, 20
- vcipher 18, 18, 20
-
- vcipher 15, 15, 21
- vcipher 16, 16, 21
- vcipher 17, 17, 21
- vcipher 18, 18, 21
-
- vcipher 15, 15, 22
- vcipher 16, 16, 22
- vcipher 17, 17, 22
- vcipher 18, 18, 22
-
- xxlor 23+32, 9, 9
- vcipher 15, 15, 23
- vcipher 16, 16, 23
- vcipher 17, 17, 23
- vcipher 18, 18, 23
+.macro SAVE_GPR GPR OFFSET FRAME
+ std \GPR,\OFFSET(\FRAME)
.endm
- # 8x loops
- # v15 - v22 - input states
- # vs1 - vs9 - round keys
- #
-.macro Loop_aes_middle8x
- xxlor 23+32, 1, 1
- xxlor 24+32, 2, 2
- xxlor 25+32, 3, 3
- xxlor 26+32, 4, 4
-
- vcipher 15, 15, 23
- vcipher 16, 16, 23
- vcipher 17, 17, 23
- vcipher 18, 18, 23
- vcipher 19, 19, 23
- vcipher 20, 20, 23
- vcipher 21, 21, 23
- vcipher 22, 22, 23
-
- vcipher 15, 15, 24
- vcipher 16, 16, 24
- vcipher 17, 17, 24
- vcipher 18, 18, 24
- vcipher 19, 19, 24
- vcipher 20, 20, 24
- vcipher 21, 21, 24
- vcipher 22, 22, 24
-
- vcipher 15, 15, 25
- vcipher 16, 16, 25
- vcipher 17, 17, 25
- vcipher 18, 18, 25
- vcipher 19, 19, 25
- vcipher 20, 20, 25
- vcipher 21, 21, 25
- vcipher 22, 22, 25
-
- vcipher 15, 15, 26
- vcipher 16, 16, 26
- vcipher 17, 17, 26
- vcipher 18, 18, 26
- vcipher 19, 19, 26
- vcipher 20, 20, 26
- vcipher 21, 21, 26
- vcipher 22, 22, 26
-
- xxlor 23+32, 5, 5
- xxlor 24+32, 6, 6
- xxlor 25+32, 7, 7
- xxlor 26+32, 8, 8
-
- vcipher 15, 15, 23
- vcipher 16, 16, 23
- vcipher 17, 17, 23
- vcipher 18, 18, 23
- vcipher 19, 19, 23
- vcipher 20, 20, 23
- vcipher 21, 21, 23
- vcipher 22, 22, 23
-
- vcipher 15, 15, 24
- vcipher 16, 16, 24
- vcipher 17, 17, 24
- vcipher 18, 18, 24
- vcipher 19, 19, 24
- vcipher 20, 20, 24
- vcipher 21, 21, 24
- vcipher 22, 22, 24
-
- vcipher 15, 15, 25
- vcipher 16, 16, 25
- vcipher 17, 17, 25
- vcipher 18, 18, 25
- vcipher 19, 19, 25
- vcipher 20, 20, 25
- vcipher 21, 21, 25
- vcipher 22, 22, 25
-
- vcipher 15, 15, 26
- vcipher 16, 16, 26
- vcipher 17, 17, 26
- vcipher 18, 18, 26
- vcipher 19, 19, 26
- vcipher 20, 20, 26
- vcipher 21, 21, 26
- vcipher 22, 22, 26
-
- xxlor 23+32, 9, 9
- vcipher 15, 15, 23
- vcipher 16, 16, 23
- vcipher 17, 17, 23
- vcipher 18, 18, 23
- vcipher 19, 19, 23
- vcipher 20, 20, 23
- vcipher 21, 21, 23
- vcipher 22, 22, 23
+.macro SAVE_VRS VRS OFFSET FRAME
+ stxv \VRS+32, \OFFSET(\FRAME)
.endm
-.macro Loop_aes_middle_1x
- xxlor 19+32, 1, 1
- xxlor 20+32, 2, 2
- xxlor 21+32, 3, 3
- xxlor 22+32, 4, 4
-
- vcipher 15, 15, 19
- vcipher 15, 15, 20
- vcipher 15, 15, 21
- vcipher 15, 15, 22
-
- xxlor 19+32, 5, 5
- xxlor 20+32, 6, 6
- xxlor 21+32, 7, 7
- xxlor 22+32, 8, 8
-
- vcipher 15, 15, 19
- vcipher 15, 15, 20
- vcipher 15, 15, 21
- vcipher 15, 15, 22
-
- xxlor 19+32, 9, 9
- vcipher 15, 15, 19
+.macro RESTORE_GPR GPR OFFSET FRAME
+ ld \GPR,\OFFSET(\FRAME)
.endm
- #
- # Compute 4x hash values based on Karatsuba method.
- #
-.macro ppc_aes_gcm_ghash
- vxor 15, 15, 0
-
- vpmsumd 23, 12, 15 # H4.L * X.L
- vpmsumd 24, 9, 16
- vpmsumd 25, 6, 17
- vpmsumd 26, 3, 18
-
- vxor 23, 23, 24
- vxor 23, 23, 25
- vxor 23, 23, 26 # L
-
- vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L
- vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L
- vpmsumd 26, 7, 17
- vpmsumd 27, 4, 18
-
- vxor 24, 24, 25
- vxor 24, 24, 26
- vxor 24, 24, 27 # M
-
- # sum hash and reduction with H Poly
- vpmsumd 28, 23, 2 # reduction
-
- vxor 29, 29, 29
- vsldoi 26, 24, 29, 8 # mL
- vsldoi 29, 29, 24, 8 # mH
- vxor 23, 23, 26 # mL + L
-
- vsldoi 23, 23, 23, 8 # swap
- vxor 23, 23, 28
-
- vpmsumd 24, 14, 15 # H4.H * X.H
- vpmsumd 25, 11, 16
- vpmsumd 26, 8, 17
- vpmsumd 27, 5, 18
-
- vxor 24, 24, 25
- vxor 24, 24, 26
- vxor 24, 24, 27
-
- vxor 24, 24, 29
-
- # sum hash and reduction with H Poly
- vsldoi 27, 23, 23, 8 # swap
- vpmsumd 23, 23, 2
- vxor 27, 27, 24
- vxor 23, 23, 27
-
- xxlor 32, 23+32, 23+32 # update hash
-
+.macro RESTORE_VRS VRS OFFSET FRAME
+ lxv \VRS+32, \OFFSET(\FRAME)
.endm
- #
- # Combine two 4x ghash
- # v15 - v22 - input blocks
- #
-.macro ppc_aes_gcm_ghash2_4x
- # first 4x hash
- vxor 15, 15, 0 # Xi + X
-
- vpmsumd 23, 12, 15 # H4.L * X.L
- vpmsumd 24, 9, 16
- vpmsumd 25, 6, 17
- vpmsumd 26, 3, 18
-
- vxor 23, 23, 24
- vxor 23, 23, 25
- vxor 23, 23, 26 # L
-
- vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L
- vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L
- vpmsumd 26, 7, 17
- vpmsumd 27, 4, 18
-
- vxor 24, 24, 25
- vxor 24, 24, 26
-
- # sum hash and reduction with H Poly
- vpmsumd 28, 23, 2 # reduction
-
- vxor 29, 29, 29
-
- vxor 24, 24, 27 # M
- vsldoi 26, 24, 29, 8 # mL
- vsldoi 29, 29, 24, 8 # mH
- vxor 23, 23, 26 # mL + L
-
- vsldoi 23, 23, 23, 8 # swap
- vxor 23, 23, 28
+.macro SAVE_REGS
+ mflr 0
+ std 0, 16(1)
+ stdu 1,-512(1)
+
+ SAVE_GPR 14, 112, 1
+ SAVE_GPR 15, 120, 1
+ SAVE_GPR 16, 128, 1
+ SAVE_GPR 17, 136, 1
+ SAVE_GPR 18, 144, 1
+ SAVE_GPR 19, 152, 1
+ SAVE_GPR 20, 160, 1
+ SAVE_GPR 21, 168, 1
+ SAVE_GPR 22, 176, 1
+ SAVE_GPR 23, 184, 1
+ SAVE_GPR 24, 192, 1
+
+ addi 9, 1, 256
+ SAVE_VRS 20, 0, 9
+ SAVE_VRS 21, 16, 9
+ SAVE_VRS 22, 32, 9
+ SAVE_VRS 23, 48, 9
+ SAVE_VRS 24, 64, 9
+ SAVE_VRS 25, 80, 9
+ SAVE_VRS 26, 96, 9
+ SAVE_VRS 27, 112, 9
+ SAVE_VRS 28, 128, 9
+ SAVE_VRS 29, 144, 9
+ SAVE_VRS 30, 160, 9
+ SAVE_VRS 31, 176, 9
+.endm # SAVE_REGS
- vpmsumd 24, 14, 15 # H4.H * X.H
- vpmsumd 25, 11, 16
- vpmsumd 26, 8, 17
- vpmsumd 27, 5, 18
+.macro RESTORE_REGS
+ addi 9, 1, 256
+ RESTORE_VRS 20, 0, 9
+ RESTORE_VRS 21, 16, 9
+ RESTORE_VRS 22, 32, 9
+ RESTORE_VRS 23, 48, 9
+ RESTORE_VRS 24, 64, 9
+ RESTORE_VRS 25, 80, 9
+ RESTORE_VRS 26, 96, 9
+ RESTORE_VRS 27, 112, 9
+ RESTORE_VRS 28, 128, 9
+ RESTORE_VRS 29, 144, 9
+ RESTORE_VRS 30, 160, 9
+ RESTORE_VRS 31, 176, 9
+
+ RESTORE_GPR 14, 112, 1
+ RESTORE_GPR 15, 120, 1
+ RESTORE_GPR 16, 128, 1
+ RESTORE_GPR 17, 136, 1
+ RESTORE_GPR 18, 144, 1
+ RESTORE_GPR 19, 152, 1
+ RESTORE_GPR 20, 160, 1
+ RESTORE_GPR 21, 168, 1
+ RESTORE_GPR 22, 176, 1
+ RESTORE_GPR 23, 184, 1
+ RESTORE_GPR 24, 192, 1
+
+ addi 1, 1, 512
+ ld 0, 16(1)
+ mtlr 0
+.endm # RESTORE_REGS
+
+# 4x loops
+.macro AES_CIPHER_4x _VCIPHER ST r
+ \_VCIPHER \ST, \ST, \r
+ \_VCIPHER \ST+1, \ST+1, \r
+ \_VCIPHER \ST+2, \ST+2, \r
+ \_VCIPHER \ST+3, \ST+3, \r
+.endm
- vxor 24, 24, 25
- vxor 24, 24, 26
- vxor 24, 24, 27 # H
+# 8x loops
+.macro AES_CIPHER_8x _VCIPHER ST r
+ \_VCIPHER \ST, \ST, \r
+ \_VCIPHER \ST+1, \ST+1, \r
+ \_VCIPHER \ST+2, \ST+2, \r
+ \_VCIPHER \ST+3, \ST+3, \r
+ \_VCIPHER \ST+4, \ST+4, \r
+ \_VCIPHER \ST+5, \ST+5, \r
+ \_VCIPHER \ST+6, \ST+6, \r
+ \_VCIPHER \ST+7, \ST+7, \r
+.endm
- vxor 24, 24, 29 # H + mH
+.macro LOOP_8AES_STATE
+ xxlor 32+23, 1, 1
+ xxlor 32+24, 2, 2
+ xxlor 32+25, 3, 3
+ xxlor 32+26, 4, 4
+ AES_CIPHER_8x vcipher, 15, 23
+ AES_CIPHER_8x vcipher, 15, 24
+ AES_CIPHER_8x vcipher, 15, 25
+ AES_CIPHER_8x vcipher, 15, 26
+ xxlor 32+23, 5, 5
+ xxlor 32+24, 6, 6
+ xxlor 32+25, 7, 7
+ xxlor 32+26, 8, 8
+ AES_CIPHER_8x vcipher, 15, 23
+ AES_CIPHER_8x vcipher, 15, 24
+ AES_CIPHER_8x vcipher, 15, 25
+ AES_CIPHER_8x vcipher, 15, 26
+.endm
- # sum hash and reduction with H Poly
- vsldoi 27, 23, 23, 8 # swap
- vpmsumd 23, 23, 2
- vxor 27, 27, 24
- vxor 27, 23, 27 # 1st Xi
-
- # 2nd 4x hash
- vpmsumd 24, 9, 20
- vpmsumd 25, 6, 21
- vpmsumd 26, 3, 22
- vxor 19, 19, 27 # Xi + X
- vpmsumd 23, 12, 19 # H4.L * X.L
-
- vxor 23, 23, 24
- vxor 23, 23, 25
- vxor 23, 23, 26 # L
-
- vpmsumd 24, 13, 19 # H4.L * X.H + H4.H * X.L
- vpmsumd 25, 10, 20 # H3.L * X1.H + H3.H * X1.L
- vpmsumd 26, 7, 21
- vpmsumd 27, 4, 22
-
- vxor 24, 24, 25
- vxor 24, 24, 26
+#
+# PPC_GHASH4x(H, S1, S2, S3, S4): Compute 4x hash values based on Karatsuba method.
+# H: returning digest
+# S#: states
+#
+# S1 should xor with the previous digest
+#
+# Xi = v0
+# H Poly = v2
+# Hash keys = v3 - v14
+# Scratch: v23 - v29
+#
+.macro PPC_GHASH4x H S1 S2 S3 S4
+
+ vpmsumd 23, 12, \S1 # H4.L * X.L
+ vpmsumd 24, 9, \S2
+ vpmsumd 25, 6, \S3
+ vpmsumd 26, 3, \S4
+
+ vpmsumd 27, 13, \S1 # H4.L * X.H + H4.H * X.L
+ vpmsumd 28, 10, \S2 # H3.L * X1.H + H3.H * X1.L
+
+ vxor 23, 23, 24
+ vxor 23, 23, 25
+ vxor 23, 23, 26 # L
+
+ vxor 24, 27, 28
+ vpmsumd 25, 7, \S3
+ vpmsumd 26, 4, \S4
+
+ vxor 24, 24, 25
+ vxor 24, 24, 26 # M
# sum hash and reduction with H Poly
- vpmsumd 28, 23, 2 # reduction
-
- vxor 29, 29, 29
+ vpmsumd 28, 23, 2 # reduction
- vxor 24, 24, 27 # M
- vsldoi 26, 24, 29, 8 # mL
- vsldoi 29, 29, 24, 8 # mH
- vxor 23, 23, 26 # mL + L
+ vxor 1, 1, 1
+ vsldoi 25, 24, 1, 8 # mL
+ vsldoi 1, 1, 24, 8 # mH
+ vxor 23, 23, 25 # mL + L
- vsldoi 23, 23, 23, 8 # swap
- vxor 23, 23, 28
+ # This performs swap and xor like,
+ # vsldoi 23, 23, 23, 8 # swap
+ # vxor 23, 23, 28
+ xxlor 32+25, 10, 10
+ vpermxor 23, 23, 28, 25
- vpmsumd 24, 14, 19 # H4.H * X.H
- vpmsumd 25, 11, 20
- vpmsumd 26, 8, 21
- vpmsumd 27, 5, 22
+ vpmsumd 26, 14, \S1 # H4.H * X.H
+ vpmsumd 27, 11, \S2
+ vpmsumd 28, 8, \S3
+ vpmsumd 29, 5, \S4
- vxor 24, 24, 25
- vxor 24, 24, 26
- vxor 24, 24, 27 # H
+ vxor 24, 26, 27
+ vxor 24, 24, 28
+ vxor 24, 24, 29
- vxor 24, 24, 29 # H + mH
+ vxor 24, 24, 1
# sum hash and reduction with H Poly
- vsldoi 27, 23, 23, 8 # swap
- vpmsumd 23, 23, 2
- vxor 27, 27, 24
- vxor 23, 23, 27
-
- xxlor 32, 23+32, 23+32 # update hash
-
+ vsldoi 25, 23, 23, 8 # swap
+ vpmsumd 23, 23, 2
+ vxor 27, 25, 24
+ vxor \H, 23, 27
.endm
- #
- # Compute update single hash
- #
-.macro ppc_update_hash_1x
- vxor 28, 28, 0
-
- vxor 19, 19, 19
+#
+# Compute update single ghash
+# scratch: v1, v22..v27
+#
+.macro PPC_GHASH1x H S1
- vpmsumd 22, 3, 28 # L
- vpmsumd 23, 4, 28 # M
- vpmsumd 24, 5, 28 # H
+ vxor 1, 1, 1
- vpmsumd 27, 22, 2 # reduction
+ vpmsumd 22, 3, \S1 # L
+ vpmsumd 23, 4, \S1 # M
+ vpmsumd 24, 5, \S1 # H
- vsldoi 25, 23, 19, 8 # mL
- vsldoi 26, 19, 23, 8 # mH
- vxor 22, 22, 25 # LL + LL
- vxor 24, 24, 26 # HH + HH
+ vpmsumd 27, 22, 2 # reduction
- vsldoi 22, 22, 22, 8 # swap
- vxor 22, 22, 27
+ vsldoi 25, 23, 1, 8 # mL
+ vsldoi 26, 1, 23, 8 # mH
+ vxor 22, 22, 25 # LL + LL
+ vxor 24, 24, 26 # HH + HH
- vsldoi 20, 22, 22, 8 # swap
- vpmsumd 22, 22, 2 # reduction
- vxor 20, 20, 24
- vxor 22, 22, 20
+ xxlor 32+25, 10, 10
+ vpermxor 22, 22, 27, 25
- vmr 0, 22 # update hash
-
-.endm
-
-.macro SAVE_REGS
- stdu 1,-640(1)
- mflr 0
-
- std 14,112(1)
- std 15,120(1)
- std 16,128(1)
- std 17,136(1)
- std 18,144(1)
- std 19,152(1)
- std 20,160(1)
- std 21,168(1)
- li 9, 256
- stvx 20, 9, 1
- addi 9, 9, 16
- stvx 21, 9, 1
- addi 9, 9, 16
- stvx 22, 9, 1
- addi 9, 9, 16
- stvx 23, 9, 1
- addi 9, 9, 16
- stvx 24, 9, 1
- addi 9, 9, 16
- stvx 25, 9, 1
- addi 9, 9, 16
- stvx 26, 9, 1
- addi 9, 9, 16
- stvx 27, 9, 1
- addi 9, 9, 16
- stvx 28, 9, 1
- addi 9, 9, 16
- stvx 29, 9, 1
- addi 9, 9, 16
- stvx 30, 9, 1
- addi 9, 9, 16
- stvx 31, 9, 1
- stxv 14, 464(1)
- stxv 15, 480(1)
- stxv 16, 496(1)
- stxv 17, 512(1)
- stxv 18, 528(1)
- stxv 19, 544(1)
- stxv 20, 560(1)
- stxv 21, 576(1)
- stxv 22, 592(1)
- std 0, 656(1)
-.endm
-
-.macro RESTORE_REGS
- lxv 14, 464(1)
- lxv 15, 480(1)
- lxv 16, 496(1)
- lxv 17, 512(1)
- lxv 18, 528(1)
- lxv 19, 544(1)
- lxv 20, 560(1)
- lxv 21, 576(1)
- lxv 22, 592(1)
- li 9, 256
- lvx 20, 9, 1
- addi 9, 9, 16
- lvx 21, 9, 1
- addi 9, 9, 16
- lvx 22, 9, 1
- addi 9, 9, 16
- lvx 23, 9, 1
- addi 9, 9, 16
- lvx 24, 9, 1
- addi 9, 9, 16
- lvx 25, 9, 1
- addi 9, 9, 16
- lvx 26, 9, 1
- addi 9, 9, 16
- lvx 27, 9, 1
- addi 9, 9, 16
- lvx 28, 9, 1
- addi 9, 9, 16
- lvx 29, 9, 1
- addi 9, 9, 16
- lvx 30, 9, 1
- addi 9, 9, 16
- lvx 31, 9, 1
-
- ld 0, 656(1)
- ld 14,112(1)
- ld 15,120(1)
- ld 16,128(1)
- ld 17,136(1)
- ld 18,144(1)
- ld 19,152(1)
- ld 20,160(1)
- ld 21,168(1)
-
- mtlr 0
- addi 1, 1, 640
+ vsldoi 23, 22, 22, 8 # swap
+ vpmsumd 22, 22, 2 # reduction
+ vxor 23, 23, 24
+ vxor \H, 22, 23
.endm
+#
+# LOAD_HASH_TABLE
+# Xi = v0
+# H Poly = v2
+# Hash keys = v3 - v14
+#
.macro LOAD_HASH_TABLE
# Load Xi
lxvb16x 32, 0, 8 # load Xi
@@ -557,657 +298,434 @@
lxvd2x 14+32, 10, 8 # H^4h
.endm
- #
- # aes_p10_gcm_encrypt (const void *inp, void *out, size_t len,
- # const char *rk, unsigned char iv[16], void *Xip);
- #
- # r3 - inp
- # r4 - out
- # r5 - len
- # r6 - AES round keys
- # r7 - iv and other data
- # r8 - Xi, HPoli, hash keys
- #
- # rounds is at offset 240 in rk
- # Xi is at 0 in gcm_table (Xip).
- #
-_GLOBAL(aes_p10_gcm_encrypt)
-.align 5
-
- SAVE_REGS
-
- LOAD_HASH_TABLE
-
- # initialize ICB: GHASH( IV ), IV - r7
- lxvb16x 30+32, 0, 7 # load IV - v30
-
- mr 12, 5 # length
- li 11, 0 # block index
-
- # counter 1
- vxor 31, 31, 31
- vspltisb 22, 1
- vsldoi 31, 31, 22,1 # counter 1
-
- # load round key to VSR
- lxv 0, 0(6)
- lxv 1, 0x10(6)
- lxv 2, 0x20(6)
- lxv 3, 0x30(6)
- lxv 4, 0x40(6)
- lxv 5, 0x50(6)
- lxv 6, 0x60(6)
- lxv 7, 0x70(6)
- lxv 8, 0x80(6)
- lxv 9, 0x90(6)
- lxv 10, 0xa0(6)
-
- # load rounds - 10 (128), 12 (192), 14 (256)
- lwz 9,240(6)
-
- #
- # vxor state, state, w # addroundkey
- xxlor 32+29, 0, 0
- vxor 15, 30, 29 # IV + round key - add round key 0
-
- cmpdi 9, 10
- beq Loop_aes_gcm_8x
-
- # load 2 more round keys (v11, v12)
- lxv 11, 0xb0(6)
- lxv 12, 0xc0(6)
-
- cmpdi 9, 12
- beq Loop_aes_gcm_8x
-
- # load 2 more round keys (v11, v12, v13, v14)
- lxv 13, 0xd0(6)
- lxv 14, 0xe0(6)
- cmpdi 9, 14
- beq Loop_aes_gcm_8x
-
- b aes_gcm_out
-
-.align 5
-Loop_aes_gcm_8x:
- mr 14, 3
- mr 9, 4
-
- #
- # check partial block
- #
-Continue_partial_check:
- ld 15, 56(7)
- cmpdi 15, 0
- beq Continue
- bgt Final_block
- cmpdi 15, 16
- blt Final_block
-
-Continue:
- # n blcoks
- li 10, 128
- divdu 10, 12, 10 # n 128 bytes-blocks
- cmpdi 10, 0
- beq Loop_last_block
-
- vaddudm 30, 30, 31 # IV + counter
- vxor 16, 30, 29
- vaddudm 30, 30, 31
- vxor 17, 30, 29
- vaddudm 30, 30, 31
- vxor 18, 30, 29
- vaddudm 30, 30, 31
- vxor 19, 30, 29
- vaddudm 30, 30, 31
- vxor 20, 30, 29
- vaddudm 30, 30, 31
- vxor 21, 30, 29
- vaddudm 30, 30, 31
- vxor 22, 30, 29
-
- mtctr 10
-
- li 15, 16
- li 16, 32
- li 17, 48
- li 18, 64
- li 19, 80
- li 20, 96
- li 21, 112
-
- lwz 10, 240(6)
-
-Loop_8x_block:
-
- lxvb16x 15, 0, 14 # load block