// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023, Intel Corporation. */
#include "ice.h"
#include "ice_eswitch_br.h"
#include "ice_repr.h"
#include "ice_switch.h"
#include "ice_vlan.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_trace.h"
#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
static const struct rhashtable_params ice_fdb_ht_params = {
.key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
.key_len = sizeof(struct ice_esw_br_fdb_data),
.head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
.automatic_shrinking = true,
};
static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
{
/* Accept only PF netdev, PRs and LAG */
return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
netif_is_lag_master(dev);
}
static struct net_device *
ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
{
struct net_device *lower;
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (netif_is_ice(lower))
return lower;
}
return NULL;
}
static struct ice_esw_br_port *
ice_eswitch_br_netdev_to_port(struct net_device *dev)
{
if (ice_is_port_repr_netdev(dev)) {
struct ice_repr *repr = ice_netdev_to_repr(dev);
return repr->br_port;
} else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
struct net_device *ice_dev;
struct ice_pf *pf;
if (netif_is_lag_master(dev))
ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
else
ice_dev = dev;
if (!ice_dev)
return NULL;
pf = ice_netdev_to_pf(ice_dev);
return pf->br_port;
}
return NULL;
}
static void
ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
u8 pf_id, u16 vf_vsi_idx)
{
rule_info->sw_act.vsi_handle = vf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_RX;
rule_info->sw_act.src = pf_id;
rule_info->priority = 2;
}
static void
ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
u16 pf_vsi_idx)
{
rule_info->sw_act.vsi_handle = pf_vsi_idx;
rule_info->sw_act.flag |= ICE_