summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c24
-rw-r--r--drivers/net/virtio_net.c325
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c3
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c13
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c144
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c173
-rw-r--r--drivers/vdpa/vdpa.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c18
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c176
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c102
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c180
-rw-r--r--drivers/vhost/scsi.c85
-rw-r--r--drivers/vhost/vdpa.c38
-rw-r--r--drivers/vhost/vringh.c78
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/virtio/virtio_mmio.c14
-rw-r--r--drivers/virtio/virtio_pci_common.c32
-rw-r--r--drivers/virtio/virtio_pci_common.h3
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c153
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c770
-rw-r--r--drivers/virtio/virtio_vdpa.c18
33 files changed, 1971 insertions, 510 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d7d72e8f6e55..30255fcaf181 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+ return vq;
+}
+
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return found;
}
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
-{
- struct virtio_blk *vblk = data;
- struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
- WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
- hctx->driver_data = vq;
- return 0;
-}
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
- .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3b3eebad3977..d934774e9733 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -267,6 +270,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -284,6 +293,9 @@ struct padded_vnet_hdr {
char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -1628,6 +1640,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1875,6 +1892,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -2285,10 +2366,57 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
@@ -2618,27 +2746,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2646,16 +2836,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2774,10 +2967,12 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -3171,6 +3366,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -3178,26 +3394,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -3228,6 +3432,29 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
(unsigned int)GOOD_PACKET_LEN);
}
+static void virtnet_config_sizes(struct virtnet_info *vi, u32 *sizes)
+{
+ u32 i, rx_size, tx_size;
+
+ if (vi->speed == SPEED_UNKNOWN || vi->speed < SPEED_10000) {
+ rx_size = 1024;
+ tx_size = 1024;
+
+ } else if (vi->speed < SPEED_40000) {
+ rx_size = 1024 * 4;
+ tx_size = 1024 * 4;
+
+ } else {
+ rx_size = 1024 * 8;
+ tx_size = 1024 * 8;
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ sizes[rxq2vq(i)] = rx_size;
+ sizes[txq2vq(i)] = tx_size;
+ }
+}
+
static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
@@ -3235,6 +3462,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
+ u32 *sizes;
bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
@@ -3262,10 +3490,15 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx = NULL;
}
+ sizes = kmalloc_array(total_vqs, sizeof(*sizes), GFP_KERNEL);
+ if (!sizes)
+ goto err_sizes;
+
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
callbacks[total_vqs - 1] = NULL;
names[total_vqs - 1] = "control";
+ sizes[total_vqs - 1] = 64;
}
/* Allocate/initialize parameters for send/receive virtqueues */
@@ -3280,8 +3513,10 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx[rxq2vq(i)] = true;
}
- ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
- names, ctx, NULL);
+ virtnet_config_sizes(vi, sizes);
+
+ ret = virtio_find_vqs_ctx_size(vi->vdev, total_vqs, vqs, callbacks,
+ names, sizes, ctx, NULL);
if (ret)
goto err_find;
@@ -3301,6 +3536,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
err_find:
+ kfree(sizes);
+err_sizes:
kfree(ctx);
err_ctx:
kfree(names);
@@ -3444,6 +3681,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3580,6 +3819,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -3651,6 +3897,9 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
+ virtnet_init_settings(dev);
+ virtnet_update_settings(vi);
+
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
@@ -3663,8 +3912,6 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
- virtnet_init_settings(dev);
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
vi->failover = net_failover_create(vi->dev);
if (IS_ERR(vi->failover)) {
@@ -3814,7 +4061,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
- VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 38800e86ed8a..8be13d416f48 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -928,6 +928,7 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
+ u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -959,6 +960,8 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
goto error;
}
+ vq->num_max = vring->num;
+
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 89832399e028..e5279ed9a8d7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -335,7 +335,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
size_t size;
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
@@ -402,7 +402,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
- rvring->len = vring->num;
+ rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 70ab496d0431..81c4f5776109 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring = &rvdev->vring[id];
addr = mem->va;
- len = rvring->len;
+ num = rvring->num;
/* zero vring */
- size = vring_size(len, rvring->align);
+ size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
@@ -125,6 +125,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
return ERR_PTR(-ENOMEM);
}
+ vq->num_max = num;
+
rvring->vq = vq;
vq->priv = rvring;
@@ -156,6 +158,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
+ u32 sizes[],
const bool * ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index aa96f67dd0b1..896896e32664 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -532,6 +532,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
+
+ vq->num_max = info->num;
+
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
@@ -634,6 +637,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
+ u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..75a703b803a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -29,7 +29,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
@@ -128,6 +127,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -233,15 +233,23 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
struct ifcvf_adapter *adapter;
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
adapter = vf_to_adapter(hw);
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
switch (hw->dev_type) {
case VIRTIO_ID_NET:
- config_size = sizeof(struct virtio_net_config);
+ config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
- config_size = sizeof(struct virtio_blk_config);
+ config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f5563f665cc6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -87,6 +87,8 @@ struct ifcvf_hw {
int config_irq;
int vqs_reused_irq;
u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 cap_dev_config_size;
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 0a5670729412..f9c0044c6442 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -685,7 +685,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
}
/*
- * IFCVF currently does't have on-chip IOMMU, so not
+ * IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
@@ -752,59 +752,36 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
- struct device *dev;
- int ret, i;
+ int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
- if (ifcvf_mgmt_dev->adapter)
+ if (!ifcvf_mgmt_dev->adapter)
return -EOPNOTSUPP;
- pdev = ifcvf_mgmt_dev->pdev;
- dev = &pdev->dev;
- adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, 1, 1, name, false);
- if (IS_ERR(adapter)) {
- IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return PTR_ERR(adapter);
- }
-
- ifcvf_mgmt_dev->adapter = adapter;
-
+ adapter = ifcvf_mgmt_dev->adapter;
vf = &adapter->vf;
- vf->dev_type = get_dev_type(pdev);
- vf->base = pcim_iomap_table(pdev);
+ pdev = adapter->pdev;
+ vdpa_dev = &adapter->vdpa;
- adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
-
- ret = ifcvf_init_hw(vf, pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- goto err;
- }
-
- fo