// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include "otx_cptvf.h"
#include "otx_cptvf_algs.h"
#include "otx_cptvf_reqmgr.h"
#define DRV_NAME "octeontx-cptvf"
#define DRV_VERSION "1.0"
static void vq_work_handler(unsigned long data)
{
struct otx_cptvf_wqe_info *cwqe_info =
(struct otx_cptvf_wqe_info *) data;
otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
}
static int init_worker_threads(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
struct otx_cptvf_wqe_info *cwqe_info;
int i;
cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
if (!cwqe_info)
return -ENOMEM;
if (cptvf->num_queues) {
dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
cptvf->num_queues);
}
for (i = 0; i < cptvf->num_queues; i++) {
tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
(u64)cwqe_info);
cwqe_info->vq_wqe[i].cptvf = cptvf;
}
cptvf->wqe_info = cwqe_info;
return 0;
}
static void cleanup_worker_threads(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
struct otx_cptvf_wqe_info *cwqe_info;
int i;
cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
if (!cwqe_info)
return;
if (cptvf->num_queues) {
dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
cptvf->num_queues);
}
for (i = 0; i < cptvf->num_queues; i++)
tasklet_kill(&cwqe_info->vq_wqe[i].twork);
kfree_sensitive(cwqe_info);
cptvf->wqe_info = NULL;
}
static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
{
struct otx_cpt_pending_queue *queue;
int i;
for_each_pending_queue(pqinfo, queue, i) {
if (!queue->head)
continue;
/* free single queue */
kfree_sensitive((queue->head));
queue->front = 0;
queue->rear = 0;
queue->qlen = 0;
}
pqinfo->num_queues = 0;
}
static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
u32 num_queues)
{
struct otx_cpt_pending_queue *queue = NULL;
int ret;
u32 i;
pqinfo->num_queues = num_queues;
for_each_pending_queue(pqinfo, queue, i) {
queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
}
queue->pending_count = 0;
queue->front = 0;
queue->rear = 0;
queue->qlen = qlen;
/* init queue spin lock */
spin_lock_init(&queue->lock);
}
return 0;
pending_qfail:
free_pending_queues(pqinfo);
return ret;
}
static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
u32 num_queues)
{
struct pci_dev *pdev = cptvf->pdev;
int ret;
if (!num_queues)
return 0;
ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
if (ret) {
dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
num_queues);
return ret;
}
return 0;
}
static void cleanup_pending_queues(struct otx_cptvf *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
if (!cptvf->num_queues)
return;
dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
cptvf->num_queues);
free_pending_queues(&cptvf->pqinfo);
}
static void free_command_queues(struct otx_cptvf *cptvf,
struct otx_cpt_cmd_qinfo *cqinfo)
{
struct otx_cpt_cmd_queue *queue = NULL;
struct otx_cpt_cmd_chunk *chunk = NULL;
struct pci_dev *pdev = cptvf->pdev;
int i;
/* clean up for each queue */
for (i = 0; i < cptvf->num_queues; i++) {
queue = &cqinfo->queue[i];
while (!list_empty(&cqinfo->queue[i].chead)) {
chunk = list_first_entry(&cqinfo->queue[i].chead,
struct otx_cpt_cmd_chunk, nextchunk);
dma_free_coherent(&pdev->dev, chunk->size,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;
chunk->dma_addr = 0;
list_del(&chunk->nextchunk);
kfree_sensitive(chunk);
}
queue->num_chunks = 0;
queue->idx = 0;
}
}
static int alloc_command_queues(struct otx_cptvf *cptvf,
struct otx_cpt_cmd_qinfo *cqinfo,
u32 qlen)
{
struct otx_cpt_cmd_chunk *curr, *first, *last;
struct otx_cpt_cmd_queue *queue = NULL;
struct pci_dev *pdev = cptvf->pdev;
size_t q_size, c_size, rem_q_size;
u32 qcsize_bytes;
int i