summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2020-11-14 02:29:00 +0100
committerDaniel Borkmann <daniel@iogearbox.net>2020-11-14 02:30:03 +0100
commitc14d61fca0d10498bf267c0ab1f381dd0b35d96b (patch)
tree710de0411e07d3ebe0233c8eeb5f80245cf3b63b /include
parent6f100640ca5b2a2ff67b001c9fd3de21f7b12cf2 (diff)
parentb87c57ae12dbecd50471b437e09e3f7dc916d8bc (diff)
downloadlinux-c14d61fca0d10498bf267c0ab1f381dd0b35d96b.tar.gz
linux-c14d61fca0d10498bf267c0ab1f381dd0b35d96b.tar.bz2
linux-c14d61fca0d10498bf267c0ab1f381dd0b35d96b.zip
Merge branch 'xdp-redirect-bulk'
Lorenzo Bianconi says: ==================== XDP bulk APIs introduce a defer/flush mechanism to return pages belonging to the same xdp_mem_allocator object (identified via the mem.id field) in bulk to optimize I-cache and D-cache since xdp_return_frame is usually run inside the driver NAPI tx completion loop. Convert mvneta, mvpp2 and mlx5 drivers to xdp_return_frame_bulk APIs. More details on benchmarks run on mlx5 can be found here: https://github.com/xdp-project/xdp-project/blob/master/areas/mem/xdp_bulk_return01.org Changes since v5: - do not keep looping over ptr_ring if the cache is full but release leftover pages running page_pool_return_page Changes since v4: - fix comments - introduce xdp_frame_bulk_init utility routine - compiler annotations for I-cache code layout - move rcu_read_lock outside fast-path - mlx5 xdp bulking code optimization Changes since v3: - align DEV_MAP_BULK_SIZE to XDP_BULK_QUEUE_SIZE - refactor page_pool_put_page_bulk to avoid code duplication Changes since v2: - move mvneta changes in a dedicated patch Changes since v1: - improve comments - rework xdp_return_frame_bulk routine logic - move count and xa fields at the beginning of xdp_frame_bulk struct - invert logic in page_pool_put_page_bulk for loop ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/net/page_pool.h26
-rw-r--r--include/net/xdp.h17
2 files changed, 42 insertions, 1 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 81d7773f96cd..b5b195305346 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -152,6 +152,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -165,6 +167,11 @@ static inline void page_pool_release_page(struct page_pool *pool,
struct page *page)
{
}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+}
#endif
void page_pool_put_page(struct page_pool *pool, struct page *page,
@@ -215,4 +222,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
if (unlikely(pool->p.nid != new_nid))
page_pool_update_nid(pool, new_nid);
}
+
+static inline void page_pool_ring_lock(struct page_pool *pool)
+ __acquires(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_lock(&pool->ring.producer_lock);
+ else
+ spin_lock_bh(&pool->ring.producer_lock);
+}
+
+static inline void page_pool_ring_unlock(struct page_pool *pool)
+ __releases(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_unlock(&pool->ring.producer_lock);
+ else
+ spin_unlock_bh(&pool->ring.producer_lock);
+}
+
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 3814fb631d52..7d48b2ae217a 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -104,6 +104,18 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+#define XDP_BULK_QUEUE_SIZE 16
+struct xdp_frame_bulk {
+ int count;
+ void *xa;
+ void *q[XDP_BULK_QUEUE_SIZE];
+};
+
+static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
+{
+ /* bq->count will be zero'ed when bq->xa gets updated */
+ bq->xa = NULL;
+}
static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
@@ -194,6 +206,9 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq);
/* When sending xdp_frame into the network stack, then there is no
* return point callback, which is needed to release e.g. DMA-mapping
@@ -245,6 +260,6 @@ bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
-#define DEV_MAP_BULK_SIZE 16
+#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
#endif /* __LINUX_NET_XDP_H__ */