// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
#include "mana.h"
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
return readl(g->bar0_va + offset);
}
static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
{
return readq(g->bar0_va + offset);
}
static void mana_gd_init_pf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
void __iomem *sriov_base_va;
u64 sriov_base_off;
gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
gc->shm_base = sriov_base_va +
mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
}
static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
static void mana_gd_init_registers(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
if (gc->is_pf)
mana_gd_init_pf_regs(pdev);
else
mana_gd_init_vf_regs(pdev);
}
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_query_max_resources_resp resp = {};
struct gdma_general_req req = {};
int err;
mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
sizeof(req), sizeof(resp));
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
err, resp.hdr.status);
return err ? err : -EPROTO;
}
if (gc->num_msix_usable > resp.max_msix)
gc->num_msix_usable = resp.max_msix;
if (gc->num_msix_usable <= 1)
return -ENOSPC;
gc->max_num_queues = num_online_cpus();
if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
gc->max_num_queues = MANA_MAX_NUM_QUEUES;
if (gc->max_num_queues > resp.max_eq)
gc->max_num_queues = resp.max_eq;
if (gc->max_num_queues > resp.max_cq)
gc->max_num_queues = resp.max_cq;
if (gc->max_num_queues > resp.max_sq)
gc->max_num_queues = resp.max_sq;
if (gc->max_num_queues > resp.max_rq)
gc->max_num_queues = resp.max_rq;
/* The Hardware Channel (HWC) used 1 MSI-X */
if (gc->max_num_queues > gc->num_msix_usable - 1)
gc->max_num_queues = gc->num_msix_usable - 1;
return 0;
}
static int mana_gd_detect_devices(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
u32 i, max_num_devs;
u16 dev_type;
int err;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
resp.hdr.status);
return err ? err : -EPROTO;
}
max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
for (i = 0; i < max_num_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
if (dev_type == GDMA_DEVICE_MANA) {
gc->mana.gdma_context = gc;
gc->mana.dev_id = dev;
}
}
return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
}
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp)
{
struct hw_channel_context *hwc = gc->hwc.driver_data;
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
{
dma_addr_t dma_handle;
void *buf;
if (length < PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
gmi->dev = gc->dev;
buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
if (!buf)
return -ENOMEM;
gmi->dma_handle = dma_handle;
gmi->virt_addr = buf;
gmi->length = length;
return 0;
}
void mana_gd_free_memory(struct gdma_mem_info *gmi)
{
dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
gmi->dma_handle);
}
static int mana_gd_create_hw_eq(struct gdma_context *gc,
struct gdma_queue *queue)
{
struct gdma_create_queue_resp resp = {};
struct gdma_create_queue_req req = {};
int err;
if (queue->type != GDMA_EQ)
return -EINVAL;
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
sizeof(req), sizeof(resp));
req.hdr.dev_id = queue->gdma_dev->dev_id;
req.type = queue->type;
req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell;
req.gdma_region = queue->mem_info.gdma_region;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
resp.hdr.status);
return err ? err : -EPROTO;
}
queue->id = resp.queue_index;
queue->eq.disable_needed = true;
queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
return 0;
}
static int mana_gd_disable_queue(struct gdma_queue *queue)
{
struct gdma_context *gc = queue->gdma_dev->gdma_context;
struct gdma_disable_queue_req req = {};
struct gdma_general_resp resp = {};
int err;
WARN_ON(queue->type != GDMA_EQ);
mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
sizeof(req), sizeof(resp));
req.hdr.dev_id = queue->gdma_dev->dev_id;
req.type = queue->type;
req.queue_index = queue->id;
req.alloc_res_id_on_creation = 1;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
resp.hdr.status);
return err ? err : -EPROTO;
}
return 0;
}
#define DOORBELL_OFFSET_SQ 0x0
#define DOORBELL_OFFSET_RQ 0x400
#define DOORBELL_OFFSET_CQ 0x800
#define DOORBELL_OFFSET_EQ 0xFF8
static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
enum gdma_queue_type q_type, u32 qid,
u32 tail_ptr, u8 num_req)
{
void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
union gdma_doorbell_entry e = {};
switch (q_type) {
case GDMA_EQ:
e.eq.id = qid;
e.eq.tail_ptr = tail_ptr;
e.eq.arm = num_req;
addr += DOORBELL_OFFSET_EQ;
break;
case GDMA_CQ:
e.cq.id = qid;
e.cq.tail_ptr = tail_ptr;
e.cq.arm = num_req;
addr += DOORBELL_OFFSET_CQ;
break;
case GDMA_RQ:
e.rq.id = qid;
e.rq.tail_ptr = tail_ptr;
e.rq.wqe_cnt = num_req;
addr += DOORBELL_OFFSET_RQ;
break;
case GDMA_SQ:
e.sq.id = qid;
e.sq.tail_ptr = tail_ptr;
addr += DOORBELL_OFFSET_SQ;
break;
default:
WARN_ON(1);
return;
}
/* Ensure all writes are done before ring doorbell */
wmb();
writeq(e.as_uint64, addr);
}
void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
{
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
}
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
struct gdma_context *gc = cq->gdma_dev->gdma_context;
u32 num_c
|