// SPDX-License-Identifier: GPL-2.0-only
/*
* Vxlan vni filter for collect metadata mode
*
* Authors: Roopa Prabhu <roopa@nvidia.com>
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/rhashtable.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/vxlan.h>
#include "vxlan_private.h"
static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct vxlan_vni_node *vnode = ptr;
__be32 vni = *(__be32 *)arg->key;
return vnode->vni != vni;
}
const struct rhashtable_params vxlan_vni_rht_params = {
.head_offset = offsetof(struct vxlan_vni_node, vnode),
.key_offset = offsetof(struct vxlan_vni_node, vni),
.key_len = sizeof(__be32),
.nelem_hint = 3,
.max_size = VXLAN_N_VID,
.obj_cmpfn = vxlan_vni_cmp,
.automatic_shrinking = true,
};
static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
struct vxlan_vni_node *v,
bool del)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_dev_node *node;
struct vxlan_sock *vs;
spin_lock(&vn->sock_lock);
if (del) {
if (!hlist_unhashed(&v->hlist4.hlist))
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
if (!hlist_unhashed(&v->hlist6.hlist))
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
goto out;
}
#if IS_ENABLED(CONFIG_IPV6)
vs = rtnl_dereference(vxlan->vn6_sock);
if (vs && v) {
node = &v->hlist6;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
#endif
vs = rtnl_dereference(vxlan->vn4_sock);
if (vs && v) {
node = &v->hlist4;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
out:
spin_unlock(&vn->sock_lock);
}
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
struct vxlan_dev_node *node;
if (!vg)
return;
spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
node = &v->hlist6;
else
#endif
node = &v->hlist4;
node->vxlan = vxlan;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
spin_unlock(&vn->sock_lock);
}
void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_node *v, *tmp;
if (!vg)
return;
spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
}
spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
struct vxlan_vni_stats *dest)
{
int i;
memset(dest, 0, sizeof(*dest));
for_each_possible_cpu(i) {
struct vxlan_vni_stats_pcpu *pstats;
struct vxlan_vni_stats temp;
unsigned int start;
pstats = per_cpu_ptr(vninode->stats, i);
do {
start = u64_stats_fetch_begin(&pstats->syncp);
memcpy(&temp, &pstats->stats, sizeof(temp));
} while (u64_stats_fetch_retry(&pstats->syncp, start));
dest->rx_packets += temp.rx_packets;
dest->rx_bytes += temp.rx_bytes;
dest->rx_drops += temp.rx_drops;
dest->rx_errors += temp.rx_errors;
dest->tx_packets += temp.tx_packets;
dest->tx_bytes += temp.tx_bytes;
dest->tx_drops += temp.tx_drops;
dest->tx_errors += temp.tx_errors;
}
}
static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
int type, unsigned int len)
{
struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
u64_stats_update_begin(&pstats->syncp);
switch (type) {
case VXLAN_VNI_STATS_RX:
pstats->stats.rx_bytes += len;
pstats->stats.rx_packets++;
break;
case VXLAN_VNI_STATS_RX_DROPS:
pstats->stats.rx_drops++;
break;
case VXLAN_VNI_STATS_RX_ERRORS:
pstats->stats.