summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/riscv/Kbuild1
-rw-r--r--arch/riscv/Kconfig7
-rw-r--r--arch/riscv/crypto/Kconfig93
-rw-r--r--arch/riscv/crypto/Makefile23
-rw-r--r--arch/riscv/crypto/aes-macros.S156
-rw-r--r--arch/riscv/crypto/aes-riscv64-glue.c550
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S312
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S146
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned.S180
-rw-r--r--arch/riscv/crypto/chacha-riscv64-glue.c101
-rw-r--r--arch/riscv/crypto/chacha-riscv64-zvkb.S294
-rw-r--r--arch/riscv/crypto/ghash-riscv64-glue.c168
-rw-r--r--arch/riscv/crypto/ghash-riscv64-zvkg.S72
-rw-r--r--arch/riscv/crypto/sha256-riscv64-glue.c137
-rw-r--r--arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S225
-rw-r--r--arch/riscv/crypto/sha512-riscv64-glue.c133
-rw-r--r--arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S203
-rw-r--r--arch/riscv/crypto/sm3-riscv64-glue.c112
-rw-r--r--arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S123
-rw-r--r--arch/riscv/crypto/sm4-riscv64-glue.c107
-rw-r--r--arch/riscv/crypto/sm4-riscv64-zvksed-zvkb.S117
-rw-r--r--arch/riscv/include/asm/vector.h11
-rw-r--r--crypto/Kconfig3
23 files changed, 3274 insertions, 0 deletions
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index d25ad1c19f88..2c585f7a0b6e 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -2,6 +2,7 @@
obj-y += kernel/ mm/ net/
obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
+obj-$(CONFIG_CRYPTO) += crypto/
obj-y += errata/
obj-$(CONFIG_KVM) += kvm/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 60c8fd1a677f..61826240926d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -581,6 +581,13 @@ config TOOLCHAIN_HAS_ZBB
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900
depends on AS_HAS_OPTION_ARCH
+# This symbol indicates that the toolchain supports all v1.0 vector crypto
+# extensions, including Zvk*, Zvbb, and Zvbc. LLVM added all of these at once.
+# binutils added all except Zvkb, then added Zvkb. So we just check for Zvkb.
+config TOOLCHAIN_HAS_VECTOR_CRYPTO
+ def_bool $(as-instr, .option arch$(comma) +zvkb)
+ depends on AS_HAS_OPTION_ARCH
+
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
new file mode 100644
index 000000000000..2ad44e1d464a
--- /dev/null
+++ b/arch/riscv/crypto/Kconfig
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (riscv)"
+
+config CRYPTO_AES_RISCV64
+ tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_AES
+ select CRYPTO_SKCIPHER
+ help
+ Block cipher: AES cipher algorithms
+ Length-preserving ciphers: AES with ECB, CBC, CTR, XTS
+
+ Architecture: riscv64 using:
+ - Zvkned vector crypto extension
+ - Zvbb vector extension (XTS)
+ - Zvkb vector crypto extension (CTR)
+ - Zvkg vector crypto extension (XTS)
+
+config CRYPTO_CHACHA_RISCV64
+ tristate "Ciphers: ChaCha"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ help
+ Length-preserving ciphers: ChaCha20 stream cipher algorithm
+
+ Architecture: riscv64 using:
+ - Zvkb vector crypto extension
+
+config CRYPTO_GHASH_RISCV64
+ tristate "Hash functions: GHASH"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_GCM
+ help
+ GCM GHASH function (NIST SP 800-38D)
+
+ Architecture: riscv64 using:
+ - Zvkg vector crypto extension
+
+config CRYPTO_SHA256_RISCV64
+ tristate "Hash functions: SHA-224 and SHA-256"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SHA256
+ help
+ SHA-224 and SHA-256 secure hash algorithm (FIPS 180)
+
+ Architecture: riscv64 using:
+ - Zvknha or Zvknhb vector crypto extensions
+ - Zvkb vector crypto extension
+
+config CRYPTO_SHA512_RISCV64
+ tristate "Hash functions: SHA-384 and SHA-512"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SHA512
+ help
+ SHA-384 and SHA-512 secure hash algorithm (FIPS 180)
+
+ Architecture: riscv64 using:
+ - Zvknhb vector crypto extension
+ - Zvkb vector crypto extension
+
+config CRYPTO_SM3_RISCV64
+ tristate "Hash functions: SM3 (ShangMi 3)"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+ help
+ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
+
+ Architecture: riscv64 using:
+ - Zvksh vector crypto extension
+ - Zvkb vector crypto extension
+
+config CRYPTO_SM4_RISCV64
+ tristate "Ciphers: SM4 (ShangMi 4)"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ help
+ SM4 block cipher algorithm (OSCCA GB/T 32907-2016,
+ ISO/IEC 18033-3:2010/Amd 1:2021)
+
+ SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+ Organization of State Commercial Administration of China (OSCCA)
+ as an authorized cryptographic algorithm for use within China.
+
+ Architecture: riscv64 using:
+ - Zvksed vector crypto extension
+ - Zvkb vector crypto extension
+
+endmenu
diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile
new file mode 100644
index 000000000000..247c7bc7288c
--- /dev/null
+++ b/arch/riscv/crypto/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_AES_RISCV64) += aes-riscv64.o
+aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
+ aes-riscv64-zvkned-zvbb-zvkg.o aes-riscv64-zvkned-zvkb.o
+
+obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
+chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
+
+obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
+ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
+
+obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
+sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
+sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
+sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SM4_RISCV64) += sm4-riscv64.o
+sm4-riscv64-y := sm4-riscv64-glue.o sm4-riscv64-zvksed-zvkb.o
diff --git a/arch/riscv/crypto/aes-macros.S b/arch/riscv/crypto/aes-macros.S
new file mode 100644
index 000000000000..d1a258d04bc7
--- /dev/null
+++ b/arch/riscv/crypto/aes-macros.S
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains macros that are shared by the other aes-*.S files. The
+// generated code of these macros depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector AES block cipher extension ('Zvkned')
+
+// Loads the AES round keys from \keyp into vector registers and jumps to code
+// specific to the length of the key. Specifically:
+// - If AES-128, loads round keys into v1-v11 and jumps to \label128.
+// - If AES-192, loads round keys into v1-v13 and jumps to \label192.
+// - If AES-256, loads round keys into v1-v15 and continues onwards.
+//
+// Also sets vl=4 and vtype=e32,m1,ta,ma. Clobbers t0 and t1.
+.macro aes_begin keyp, label128, label192
+ lwu t0, 480(\keyp) // t0 = key length in bytes
+ li t1, 24 // t1 = key length for AES-192
+ vsetivli zero, 4, e32, m1, ta, ma
+ vle32.v v1, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v2, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v3, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v4, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v5, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v6, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v7, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v8, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v9, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v10, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v11, (\keyp)
+ blt t0, t1, \label128 // If AES-128, goto label128.
+ addi \keyp, \keyp, 16
+ vle32.v v12, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v13, (\keyp)
+ beq t0, t1, \label192 // If AES-192, goto label192.
+ // Else, it's AES-256.
+ addi \keyp, \keyp, 16
+ vle32.v v14, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v15, (\keyp)
+.endm
+
+// Encrypts \data using zvkned instructions, using the round keys loaded into
+// v1-v11 (for AES-128), v1-v13 (for AES-192), or v1-v15 (for AES-256). \keylen
+// is the AES key length in bits. vl and vtype must already be set
+// appropriately. Note that if vl > 4, multiple blocks are encrypted.
+.macro aes_encrypt data, keylen
+ vaesz.vs \data, v1
+ vaesem.vs \data, v2
+ vaesem.vs \data, v3
+ vaesem.vs \data, v4
+ vaesem.vs \data, v5
+ vaesem.vs \data, v6
+ vaesem.vs \data, v7
+ vaesem.vs \data, v8
+ vaesem.vs \data, v9
+ vaesem.vs \data, v10
+.if \keylen == 128
+ vaesef.vs \data, v11
+.elseif \keylen == 192
+ vaesem.vs \data, v11
+ vaesem.vs \data, v12
+ vaesef.vs \data, v13
+.else
+ vaesem.vs \data, v11
+ vaesem.vs \data, v12
+ vaesem.vs \data, v13
+ vaesem.vs \data, v14
+ vaesef.vs \data, v15
+.endif
+.endm
+
+// Same as aes_encrypt, but decrypts instead of encrypts.
+.macro aes_decrypt data, keylen
+.if \keylen == 128
+ vaesz.vs \data, v11
+.elseif \keylen == 192
+ vaesz.vs \data, v13
+ vaesdm.vs \data, v12
+ vaesdm.vs \data, v11
+.else
+ vaesz.vs \data, v15
+ vaesdm.vs \data, v14
+ vaesdm.vs \data, v13
+ vaesdm.vs \data, v12
+ vaesdm.vs \data, v11
+.endif
+ vaesdm.vs \data, v10
+ vaesdm.vs \data, v9
+ vaesdm.vs \data, v8
+ vaesdm.vs \data, v7
+ vaesdm.vs \data, v6
+ vaesdm.vs \data, v5
+ vaesdm.vs \data, v4
+ vaesdm.vs \data, v3
+ vaesdm.vs \data, v2
+ vaesdf.vs \data, v1
+.endm
+
+// Expands to aes_encrypt or aes_decrypt according to \enc, which is 1 or 0.
+.macro aes_crypt data, enc, keylen
+.if \enc
+ aes_encrypt \data, \keylen
+.else
+ aes_decrypt \data, \keylen
+.endif
+.endm
diff --git a/arch/riscv/crypto/aes-riscv64-glue.c b/arch/riscv/crypto/aes-riscv64-glue.c
new file mode 100644
index 000000000000..37bc6ef0be40
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-glue.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES using the RISC-V vector crypto extensions. Includes the bare block
+ * cipher and the ECB, CBC, CTR, and XTS modes.
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+asmlinkage void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 in[AES_BLOCK_SIZE],
+ u8 out[AES_BLOCK_SIZE]);
+asmlinkage void aes_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 in[AES_BLOCK_SIZE],
+ u8 out[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len);
+asmlinkage void aes_ecb_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len);
+
+asmlinkage void aes_cbc_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+asmlinkage void aes_cbc_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_xts_encrypt_zvkned_zvbb_zvkg(
+ const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 tweak[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_xts_decrypt_zvkned_zvbb_zvkg(
+ const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 tweak[AES_BLOCK_SIZE]);
+
+static int riscv64_aes_setkey(struct crypto_aes_ctx *ctx,
+ const u8 *key, unsigned int keylen)
+{
+ /*
+ * For now we just use the generic key expansion, for these reasons:
+ *
+ * - zvkned's key expansion instructions don't support AES-192.
+ * So, non-zvkned fallback code would be needed anyway.
+ *
+ * - Users of AES in Linux usually don't change keys frequently.
+ * So, key expansion isn't performance-critical.
+ *
+ * - For single-block AES exposed as a "cipher" algorithm, it's
+ * necessary to use struct crypto_aes_ctx and initialize its 'key_dec'
+ * field with the round keys for the Equivalent Inverse Cipher. This
+ * is because with "cipher", decryption can be requested from a
+ * context where the vector unit isn't usable, necessitating a
+ * fallback to aes_decrypt(). But, zvkned can only generate and use
+ * the normal round keys. Of course, it's preferable to not have
+ * special code just for "cipher", as e.g. XTS also uses a
+ * single-block AES encryption. It's simplest to just use
+ * struct crypto_aes_ctx and aes_expandkey() everywhere.
+ */
+ return aes_expandkey(ctx, key, keylen);
+}
+
+static int riscv64_aes_setkey_cipher(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return riscv64_aes_setkey(ctx, key, keylen);
+}
+
+static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return riscv64_aes_setkey(ctx, key, keylen);
+}
+
+/* Bare AES, without a mode of operation */
+
+static void riscv64_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ aes_encrypt_zvkned(ctx, src, dst);
+ kernel_vector_end();
+ } else {
+ aes_encrypt(ctx, dst, src);
+ }
+}
+
+static void riscv64_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ aes_decrypt_zvkned(ctx, src, dst);
+ kernel_vector_end();
+ } else {
+ aes_decrypt(ctx, dst, src);
+ }
+}
+
+/* AES-ECB */
+
+static inline int riscv64_aes_ecb_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ kernel_vector_begin();
+ if (enc)
+ aes_ecb_encrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1));
+ else
+ aes_ecb_decrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1));
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int riscv64_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_ecb_crypt(req, true);
+}
+
+static int riscv64_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_ecb_crypt(req, false);
+}
+
+/* AES-CBC */
+
+static inline int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ kernel_vector_begin();
+ if (enc)
+ aes_cbc_encrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1),
+ walk.iv);
+ else
+ aes_cbc_decrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1),
+ walk.iv);
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int riscv64_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_crypt(req, true);
+}
+
+static int riscv64_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_crypt(req, false);
+}
+
+/* AES-CTR */
+
+static int riscv64_aes_ctr_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned int nbytes, p1_nbytes;
+ struct skcipher_walk walk;
+ u32 ctr32, nblocks;
+ int err;
+
+ /* Get the low 32-bit word of the 128-bit big endian counter. */
+ ctr32 = get_unaligned_be32(req->iv + 12);
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ if (nbytes < walk.total) {
+ /* Not the end yet, so keep the length block-aligned. */
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+ nblocks = nbytes / AES_BLOCK_SIZE;
+ } else {
+ /* It's the end, so include any final partial block. */
+ nblocks = DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE);
+ }
+ ctr32 += nblocks;
+
+ kernel_vector_begin();
+ if (ctr32 >= nblocks) {
+ /* The low 32-bit word of the counter won't overflow. */
+ aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes,
+ req->iv);
+ } else {
+ /*
+ * The low 32-bit word of the counter will overflow.
+ * The assembly doesn't handle this case, so split the
+ * operation into two at the point where the overflow
+ * will occur. After the first part, add the carry bit.
+ */
+ p1_nbytes = min_t(unsigned int, nbytes,
+ (nblocks - ctr32) * AES_BLOCK_SIZE);
+ aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ p1_nbytes, req->iv);
+ crypto_inc(req->iv, 12);
+
+ if (ctr32) {
+ aes_ctr32_crypt_zvkned_zvkb(
+ ctx,
+ walk.src.virt.addr + p1_nbytes,
+ walk.dst.virt.addr + p1_nbytes,
+ nbytes - p1_nbytes, req->iv);
+ }
+ }
+ kernel_vector_end();
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+/* AES-XTS */
+
+struct riscv64_aes_xts_ctx {
+ struct crypto_aes_ctx ctx1;
+ struct crypto_aes_ctx ctx2;
+};
+
+static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return xts_verify_key(tfm, key, keylen) ?:
+ riscv64_aes_setkey(&ctx->ctx1, key, keylen / 2) ?:
+ riscv64_aes_setkey(&ctx->ctx2, key + keylen / 2, keylen / 2);
+}
+
+static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct scatterlist *src, *dst;
+ struct skcipher_walk walk;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* Encrypt the IV with the tweak key to get the first tweak. */
+ kernel_vector_begin();
+ aes_encrypt_zvkned(&ctx->ctx2, req->iv, req->iv);
+ kernel_vector_end();
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ /*
+ * If the message length isn't divisible by the AES block size and the
+ * full message isn't available in one step of the scatterlist walk,
+ * then separate off the last full block and the partial block. This
+ * ensures that they are processed in the same call to the assembly
+ * function, which is required for ciphertext stealing.
+ */
+ if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
+ skcipher_walk_abort(&walk);
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ req->cryptlen - tail - AES_BLOCK_SIZE,
+ req->iv);
+ req = &subreq;
+ err = skcipher_walk_virt(&walk, req, false);
+ } else {
+ tail = 0;
+ }
+
+ while (walk.nbytes) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+
+ kernel_vector_begin();
+ if (enc)
+ aes_xts_encrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, req->iv);
+ else
+ aes_xts_decrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, req->iv);
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ if (err || likely(!tail))
+ return err;
+
+ /* Do ciphertext stealing with the last full block and partial block. */
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+
+ kernel_vector_begin();
+ if (enc)
+ aes_xts_encrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, walk.nbytes, req->iv);
+ else
+ aes_xts_decrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, walk.nbytes, req->iv);
+ kernel_vector_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int riscv64_aes_xts_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_xts_crypt(req, true);
+}
+
+static int riscv64_aes_xts_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_xts_crypt(req, false);
+}
+
+/* Algorithm definitions */
+
+static struct crypto_alg riscv64_zvkned_aes_cipher_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "aes",
+ .cra_driver_name = "aes-riscv64-zvkned",
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = riscv64_aes_setkey_cipher,
+ .cia_encrypt = riscv64_aes_encrypt,
+ .cia_decrypt = riscv64_aes_decrypt,
+ },
+ .cra_module = THIS_MODULE,
+};
+
+static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = {
+ {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_ecb_encrypt,
+ .decrypt = riscv64_aes_ecb_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .walksize = 8 * AES_BLOCK_SIZE, /* matches LMUL=8 */
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-riscv64-zvkned",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_cbc_encrypt,
+ .decrypt = riscv64_aes_cbc_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-riscv64-zvkned",
+ .cra_module = THIS_MODULE,
+ },
+ }
+};
+
+static struct skcipher_alg riscv64_zvkned_zvkb_aes_skcipher_alg = {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_ctr_crypt,
+ .decrypt = riscv64_aes_ctr_crypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
+ .base = {
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-riscv64-zvkned-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct skcipher_alg riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg = {
+ .setkey = riscv64_aes_xts_setkey,
+ .encrypt = riscv64_aes_xts_encrypt,
+ .decrypt = riscv64_aes_xts_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct riscv64_aes_xts_ctx),
+ .cra_priority = 300,
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-riscv64-zvkned-zvbb-zvkg",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static inline bool riscv64_aes_xts_supported(void)
+{
+ return riscv_isa_extension_available(NULL, ZVBB) &&
+ riscv_isa_extension_available(NULL, ZVKG) &&
+ riscv_vector_vlen() < 2048 /* Implementation limitation */;
+}
+
+static int __init riscv64_aes_mod_init(void)
+{
+ int err = -ENODEV;
+
+ if (riscv_isa_extension_available(NULL, ZVKNED) &&
+ riscv_vector_vlen() >= 128) {
+ err = crypto_register_alg(&riscv64_zvkned_aes_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(
+ riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+ if (err)
+ goto unregister_zvkned_cipher_alg;
+
+ if (riscv_isa_extension_available(NULL, ZVKB)) {
+ err = crypto_register_skcipher(
+ &riscv64_zvkned_zvkb_aes_skcipher_alg);
+ if (err)
+ goto unregister_zvkned_skcipher_algs;
+ }
+
+ if (riscv64_aes_xts_supported()) {
+ err = crypto_register_skcipher(
+ &riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
+ if (err)
+ goto unregister_zvkned_zvkb_skcipher_alg;
+ }
+ }
+
+ return err;
+
+unregister_zvkned_zvkb_skcipher_alg:
+ if (riscv_isa_extension_available(NULL, ZVKB))
+ crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
+unregister_zvkned_skcipher_algs:
+ crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+unregister_zvkned_cipher_alg:
+ crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
+ return err;
+}
+
+static void __exit riscv64_aes_mod_exit(void)
+{
+ if (riscv64_aes_xts_supported())
+ crypto_unregister_skcipher(&riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
+ if (riscv_isa_extension_available(NULL, ZVKB))
+ crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
+ crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+ crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
+}
+
+module_init(riscv64_aes_mod_init);
+module_exit(riscv64_aes_mod_exit);
+
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS (RISC-V accelerated)");
+MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("ecb(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("ctr(aes)");
+MODULE_ALIAS_CRYPTO("xts(aes)");
diff --git a/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S b/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S
new file mode 100644
index 000000000000..146fc9cfb268
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// m