// SPDX-License-Identifier: GPL-2.0
/*
* sun8i-ss-core.c - hardware cryptographic offloader for
* Allwinner A80/A83T SoC
*
* Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
*
* Core file which registers crypto algorithms supported by the SecuritySystem
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <crypto/engine.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "sun8i-ss.h"
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
.alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
},
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
{ "bus", 0, 300 * 1000 * 1000 },
{ "mod", 0, 300 * 1000 * 1000 },
}
};
static const struct ss_variant ss_a83t_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
.alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256,
},
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
{ "bus", 0, 300 * 1000 * 1000 },
{ "mod", 0, 300 * 1000 * 1000 },
}
};
/*
* sun8i_ss_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
*/
int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss)
{
return atomic_inc_return(&ss->flow) % MAXFLOW;
}
int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx,
const char *name)
{
int flow = rctx->flow;
unsigned int ivlen = rctx->ivlen;
u32 v = SS_START;
int i;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
ss->flows[flow].stat_req++;
#endif
/* choose between stream0/stream1 */
if (flow)
v |= SS_FLOW1;
else
v |= SS_FLOW0;
v |= rctx->op_mode;
v |= rctx->method;
if (rctx->op_dir)
v |= SS_DECRYPTION;
switch (rctx->keylen) {
case 128 / 8:
v |= SS_AES_128BITS << 7;
break;
case 192 / 8:
v |= SS_AES_192BITS << 7;
break;
case 256 / 8:
v |= SS_AES_256BITS << 7;
break;
}
for (i = 0; i < MAX_SG; i++) {
if (!rctx->t_dst[i].addr)
break;
mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
if (ivlen) {
if (rctx->op_dir == SS_ENCRYPTION) {
if (i == 0)
writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
else
writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
} else {
writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
}
}
dev_dbg(ss->dev,
"Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n",
i, flow, name, v,
rctx->t_src[i].len, rctx->t_dst[i].len,
rctx->method, rctx->op_mode,
rctx->op_dir, rctx->t_src[i].len);
writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
reinit_completion(&ss->flows[flow].complete);
ss->flows[flow].status = 0;
wmb();
writel(v, ss->base + SS_CTL_REG);
mutex_unlock(&ss->mlock);
wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
msecs_to_jiffies(2000));
if (ss->flows[flow].status == 0) {
dev_err(ss->dev, "DMA timeout for %s\n", name);
return -EFAULT;
}
}
return 0;
}
static irqreturn_t ss_irq_handler(int irq, void *data)
{
struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data;
int flow = 0;
u32 p;
p = readl(ss->base + SS_INT_STA_REG);
for (flow = 0; flow < MAXFLOW; flow++) {
if (p & (BIT(flow))) {
writel(BIT(flow), ss->base + SS_INT_STA_REG);
ss->flows[flow].status = 1;
complete(&ss->flows[flow].complete);
}
}
return IRQ_HANDLED;
}
static struct sun8i_ss_alg_template ss_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_CBC,
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ss",
.cra_priority = 400