// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Hannes Reinecke, SUSE Linux
*/
#include <linux/crc32.h>
#include <linux/base64.h>
#include <linux/prandom.h>
#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-auth.h>
#define CHAP_BUF_SIZE 4096
static struct kmem_cache *nvme_chap_buf_cache;
static mempool_t *nvme_chap_buf_pool;
struct nvme_dhchap_queue_context {
struct list_head entry;
struct work_struct auth_work;
struct nvme_ctrl *ctrl;
struct crypto_shash *shash_tfm;
struct crypto_kpp *dh_tfm;
struct nvme_dhchap_key *transformed_key;
void *buf;
int qid;
int error;
u32 s1;
u32 s2;
bool bi_directional;
u16 transaction;
u8 status;
u8 dhgroup_id;
u8 hash_id;
size_t hash_len;
u8 c1[64];
u8 c2[64];
u8 response[64];
u8 *ctrl_key;
u8 *host_key;
u8 *sess_key;
int ctrl_key_len;
int host_key_len;
int sess_key_len;
};
static struct workqueue_struct *nvme_auth_wq;
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
ctrl->opts->nr_poll_queues + 1;
}
static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
struct request_queue *q = ctrl->fabrics_q;
int ret;
if (qid != 0) {
flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
q = ctrl->connect_q;
}
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
cmd.auth_common.spsp1 = 0x01;
if (auth_send) {
cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
cmd.auth_send.tl = cpu_to_le32(data_len);
} else {
cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
cmd.auth_receive.al = cpu_to_le32(data_len);
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
else if (ret < 0)
dev_err(ctrl->device,
"qid %d auth_send failed with error %d\n", qid, ret);
return ret;
}
static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
struct nvmf_auth_dhchap_failure_data *data,
u16 transaction, u8 expected_msg)
{
dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
__func__, qid, data->auth_type, data->auth_id);
if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
return data->rescode_exp;
}
if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
data->auth_id != expected_msg) {
dev_warn(ctrl->device,
"qid %d invalid message %02x/%02x\n",
qid, data->auth_type, data->auth_id);
return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
}
if (le16_to_cpu(data->t_id) != transaction) {
dev_warn(ctrl->device,
"qid %d invalid transaction ID %d\n",
qid, le16_to_cpu(data->t_id));
return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
}
return 0;
}
static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return -EINVAL;
}
memset((u8 *)chap->buf, 0, size);
data->auth_type = NVME_AUTH_COMMON_MESSAGES;
data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
data->t_id = cpu_to_le16(chap->transaction);
data->sc_c = 0; /* No secure channel concatenation */
data->napd = 1;
data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
data->auth_protocol[0].dhchap.halen = 3;
data->auth_protocol[0].dhchap.dhlen = 6;
data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
data->auth_protocol[0