diff options
author | David S. Miller <davem@davemloft.net> | 2021-03-14 14:48:26 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-03-14 14:48:26 -0700 |
commit | c6baf7eeb0cf82f6a90a703f6548250fc85cfdcc (patch) | |
tree | c6bf285f9bd54f1a0b57d604226f34895f4034f2 /include/linux/skbuff.h | |
parent | 3f79eb3c3a6abaa8f9900b5e40994060d7341cbc (diff) | |
parent | d206121faf8bb2239cd970af0bd32f5203780427 (diff) | |
download | linux-c6baf7eeb0cf82f6a90a703f6548250fc85cfdcc.tar.gz linux-c6baf7eeb0cf82f6a90a703f6548250fc85cfdcc.tar.bz2 linux-c6baf7eeb0cf82f6a90a703f6548250fc85cfdcc.zip |
Merge branch 'skbuff-micro-optimize-flow-dissection'
Alexander Lobakin says:
====================
skbuff: micro-optimize flow dissection
This little number makes all of the flow dissection functions take
raw input data pointer as const (1-5) and shuffles the branches in
__skb_header_pointer() according to their hit probability.
The result is +20 Mbps per flow/core with one Flow Dissector pass
per packet. This affects RPS (with software hashing), drivers that
use eth_get_headlen() on their Rx path and so on.
From v2 [1]:
- reword some commit messages as a potential fix for NIPA;
- no functional changes.
From v1 [0]:
- rebase on top of the latest net-next. This was super-weird, but
I double-checked that the series applies with no conflicts, and
then on Patchwork it didn't;
- no other changes.
[0] https://lore.kernel.org/netdev/20210312194538.337504-1-alobakin@pm.me
[1] https://lore.kernel.org/netdev/20210313113645.5949-1-alobakin@pm.me
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 483e89348f78..ecc029674ae4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1292,10 +1292,10 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) void __skb_get_hash(struct sk_buff *skb); u32 __skb_get_hash_symmetric(const struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); -u32 __skb_get_poff(const struct sk_buff *skb, void *data, +u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - void *data, int hlen_proto); + const void *data, int hlen_proto); static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) @@ -1314,9 +1314,8 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, struct flow_dissector *flow_dissector, - void *target_container, - void *data, __be16 proto, int nhoff, int hlen, - unsigned int flags); + void *target_container, const void *data, + __be16 proto, int nhoff, int hlen, unsigned int flags); static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -1338,9 +1337,9 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, static inline bool skb_flow_dissect_flow_keys_basic(const struct net *net, const struct sk_buff *skb, - struct flow_keys_basic *flow, void *data, - __be16 proto, int nhoff, int hlen, - unsigned int flags) + struct flow_keys_basic *flow, + const void *data, __be16 proto, + int nhoff, int hlen, unsigned int flags) { memset(flow, 0, sizeof(*flow)); return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, @@ -3678,14 +3677,13 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); static inline void * __must_check -__skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *data, int hlen, void *buffer) +__skb_header_pointer(const struct sk_buff *skb, int offset, int len, + const void *data, int hlen, void *buffer) { - if (hlen - offset >= len) - return data + offset; + if (likely(hlen - offset >= len)) + return (void *)data + offset; - if (!skb || - skb_copy_bits(skb, offset, buffer, len) < 0) + if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) return NULL; return buffer; |