diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-12-09 20:06:34 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-12-09 20:06:35 -0800 |
commit | dd8b3a802b64adf059a49a68f1bdca7846e492fc (patch) | |
tree | 2cb39d19e1b9763967e01b83c985f135057de21e | |
parent | 5fc11a401a8dc491b326d2c916b07d22e7ac8833 (diff) | |
parent | abe2343d37c2b4361547d5d31e17340ff9ec7356 (diff) | |
download | linux-dd8b3a802b64adf059a49a68f1bdca7846e492fc.tar.gz linux-dd8b3a802b64adf059a49a68f1bdca7846e492fc.tar.bz2 linux-dd8b3a802b64adf059a49a68f1bdca7846e492fc.zip |
Merge tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says:
====================
ipsec-next 2022-12-09
1) Add xfrm packet offload core API.
From Leon Romanovsky.
2) Add xfrm packet offload support for mlx5.
From Leon Romanovsky and Raed Salem.
3) Fix a typto in a error message.
From Colin Ian King.
* tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next: (38 commits)
xfrm: Fix spelling mistake "oflload" -> "offload"
net/mlx5e: Open mlx5 driver to accept IPsec packet offload
net/mlx5e: Handle ESN update events
net/mlx5e: Handle hardware IPsec limits events
net/mlx5e: Update IPsec soft and hard limits
net/mlx5e: Store all XFRM SAs in Xarray
net/mlx5e: Provide intermediate pointer to access IPsec struct
net/mlx5e: Skip IPsec encryption for TX path without matching policy
net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows
net/mlx5e: Improve IPsec flow steering autogroup
net/mlx5e: Configure IPsec packet offload flow steering
net/mlx5e: Use same coding pattern for Rx and Tx flows
net/mlx5e: Add XFRM policy offload logic
net/mlx5e: Create IPsec policy offload tables
net/mlx5e: Generalize creation of default IPsec miss group and rule
net/mlx5e: Group IPsec miss handles into separate struct
net/mlx5e: Make clear what IPsec rx_err does
net/mlx5e: Flatten the IPsec RX add rule path
net/mlx5e: Refactor FTE setup code to be more clear
net/mlx5e: Move IPsec flow table creation to separate function
...
====================
Link: https://lore.kernel.org/r/20221209093310.4018731-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
30 files changed, 2141 insertions, 512 deletions
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst index 01391dfd37d9..c43ace79e320 100644 --- a/Documentation/networking/xfrm_device.rst +++ b/Documentation/networking/xfrm_device.rst @@ -5,6 +5,7 @@ XFRM device - offloading the IPsec computations =============================================== Shannon Nelson <shannon.nelson@oracle.com> +Leon Romanovsky <leonro@nvidia.com> Overview @@ -18,10 +19,21 @@ can radically increase throughput and decrease CPU utilization. The XFRM Device interface allows NIC drivers to offer to the stack access to the hardware offload. +Right now, there are two types of hardware offload that kernel supports. + * IPsec crypto offload: + * NIC performs encrypt/decrypt + * Kernel does everything else + * IPsec packet offload: + * NIC performs encrypt/decrypt + * NIC does encapsulation + * Kernel and NIC have SA and policy in-sync + * NIC handles the SA and policies states + * The Kernel talks to the keymanager + Userland access to the offload is typically through a system such as libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can be handy when experimenting. An example command might look something -like this:: +like this for crypto offload: ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \ reqid 0x07 replay-window 32 \ @@ -29,6 +41,17 @@ like this:: sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \ offload dev eth4 dir in +and for packet offload + + ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \ + reqid 0x07 replay-window 32 \ + aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \ + sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \ + offload packet dev eth4 dir in + + ip x p add src 14.0.0.70 dst 14.0.0.52 offload packet dev eth4 dir in + tmpl src 14.0.0.70 dst 14.0.0.52 proto esp reqid 10000 mode transport + Yes, that's ugly, but that's what shell scripts and/or libreswan are for. @@ -40,17 +63,24 @@ Callbacks to implement /* from include/linux/netdevice.h */ struct xfrmdev_ops { + /* Crypto and Packet offload callbacks */ int (*xdo_dev_state_add) (struct xfrm_state *x); void (*xdo_dev_state_delete) (struct xfrm_state *x); void (*xdo_dev_state_free) (struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); + + /* Solely packet offload callbacks */ + void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); + int (*xdo_dev_policy_add) (struct xfrm_policy *x); + void (*xdo_dev_policy_delete) (struct xfrm_policy *x); + void (*xdo_dev_policy_free) (struct xfrm_policy *x); }; -The NIC driver offering ipsec offload will need to implement these -callbacks to make the offload available to the network stack's -XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and +The NIC driver offering ipsec offload will need to implement callbacks +relevant to supported offload to make the offload available to the network +stack's XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload. @@ -79,7 +109,8 @@ and an indication of whether it is for Rx or Tx. The driver should =========== =================================== 0 success - -EOPNETSUPP offload not supported, try SW IPsec + -EOPNETSUPP offload not supported, try SW IPsec, + not applicable for packet offload mode other fail the request =========== =================================== @@ -96,6 +127,7 @@ will serviceable. This can check the packet information to be sure the offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and return true of false to signify its support. +Crypto offload mode: When ready to send, the driver needs to inspect the Tx packet for the offload information, including the opaque context, and set up the packet send accordingly:: @@ -139,13 +171,25 @@ the stack in xfrm_input(). In ESN mode, xdo_dev_state_advance_esn() is called from xfrm_replay_advance_esn(). Driver will check packet seq number and update HW ESN state machine if needed. +Packet offload mode: +HW adds and deletes XFRM headers. So in RX path, XFRM stack is bypassed if HW +reported success. In TX path, the packet lefts kernel without extra header +and not encrypted, the HW is responsible to perform it. + When the SA is removed by the user, the driver's xdo_dev_state_delete() -is asked to disable the offload. Later, xdo_dev_state_free() is called -from a garbage collection routine after all reference counts to the state +and xdo_dev_policy_delete() are asked to disable the offload. Later, +xdo_dev_state_free() and xdo_dev_policy_free() are called from a garbage +collection routine after all reference counts to the state and policy have been removed and any remaining resources can be cleared for the offload state. How these are used by the driver will depend on specific hardware needs. As a netdev is set to DOWN the XFRM stack's netdev listener will call -xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded -states. +xdo_dev_state_delete(), xdo_dev_policy_delete(), xdo_dev_state_free() and +xdo_dev_policy_free() on any remaining offloaded states. + +Outcome of HW handling packets, the XFRM core can't count hard, soft limits. +The HW/driver are responsible to perform it and provide accurate data when +xdo_dev_state_update_curlft() is called. In case of one of these limits +occuried, the driver needs to call to xfrm_state_check_expire() to make sure +that XFRM performs rekeying sequence. diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index 585590520076..ca21794281d6 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -283,6 +283,10 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x) pr_debug("Cannot offload xfrm states with geniv other than seqiv\n"); return -EINVAL; } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + pr_debug("Unsupported xfrm offload\n"); + return -EINVAL; + } sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); if (!sa_entry) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 774de63dd93a..53a969e34883 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -585,6 +585,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + netdev_err(dev, "Unsupported ipsec offload type\n"); + return -EINVAL; + } + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 9984ebc62d78..c1cf540d162a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -280,6 +280,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + netdev_err(dev, "Unsupported ipsec offload type\n"); + return -EINVAL; + } + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 65790ff58a74..2d77fb8a8a01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); #endif +int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index bf2741eb7f9b..379c6dc9a3be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -84,7 +84,8 @@ enum { MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #endif #ifdef CONFIG_MLX5_EN_IPSEC - MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index 9c1c24da9453..78af8a3175bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER); aso_ctrl = &aso_wqe->aso_ctrl; - memset(aso_ctrl, 0, sizeof(*aso_ctrl)); aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6; aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE << 4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 1b03ab03fc5a..bb9023957f74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; } -struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, - unsigned int handle) +static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; - struct xfrm_state *ret = NULL; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle) - if (sa_entry->handle == handle) { - ret = sa_entry->x; - xfrm_state_hold(ret); - break; - } - rcu_read_unlock(); - - return ret; -} - -static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry) -{ - unsigned int handle = sa_entry->ipsec_obj_id; - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - struct mlx5e_ipsec_sa_entry *_sa_entry; - unsigned long flags; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle) - if (_sa_entry->handle == handle) { - rcu_read_unlock(); - return -EEXIST; - } - rcu_read_unlock(); - - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); - sa_entry->handle = handle; - hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); - - return 0; -} - -static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) -{ - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - unsigned long flags; - - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); - hash_del_rcu(&sa_entry->hlist); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); + return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; } static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -129,9 +83,33 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) return false; } -static void -mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, - struct mlx5_accel_esp_xfrm_attrs *attrs) +static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct xfrm_state *x = sa_entry->x; + + attrs->hard_packet_limit = x->lft.hard_packet_limit; + if (x->lft.soft_packet_limit == XFRM_INF) + return; + + /* Hardware decrements hard_packet_limit counter through + * the operation. While fires an event when soft_packet_limit + * is reached. It emans that we need substitute the numbers + * in order to properly count soft limit. + * + * As an example: + * XFRM user sets soft limit is 2 and hard limit is 9 and + * expects to see soft event after 2 packets and hard event + * after 9 packets. In our case, the hard limit will be set + * to 9 and soft limit is comparator to 7 so user gets the + * soft event after 2 packeta + */ + attrs->soft_packet_limit = + x->lft.hard_packet_limit - x->lft.soft_packet_limit; +} + +void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) { struct xfrm_state *x = sa_entry->x; struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; @@ -157,33 +135,31 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, sizeof(aes_gcm->salt)); + attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */ + /* iv len */ aes_gcm->icv_len = x->aead->alg_icv_len; /* esn */ if (sa_entry->esn_state.trigger) { - attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; + attrs->esn_trigger = true; attrs->esn = sa_entry->esn_state.esn; - if (sa_entry->esn_state.overlap) - attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; + attrs->esn_overlap = sa_entry->esn_state.overlap; + attrs->replay_window = x->replay_esn->replay_window; } - /* action */ - attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ? - MLX5_ACCEL_ESP_ACTION_ENCRYPT : - MLX5_ACCEL_ESP_ACTION_DECRYPT; - /* flags */ - attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ? - MLX5_ACCEL_ESP_FLAGS_TRANSPORT : - MLX5_ACCEL_ESP_FLAGS_TUNNEL; - + attrs->dir = x->xso.dir; /* spi */ attrs->spi = be32_to_cpu(x->id.spi); /* source , destination ips */ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); - attrs->is_ipv6 = (x->props.family != AF_INET); + attrs->family = x->props.family; + attrs->type = x->xso.type; + attrs->reqid = x->props.reqid; + + mlx5e_ipsec_init_limits(sa_entry, attrs); } static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) @@ -215,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n"); return -EINVAL; } - if (x->props.mode != XFRM_MODE_TRANSPORT && - x->props.mode != XFRM_MODE_TUNNEL) { - dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n"); - return -EINVAL; - } if (x->id.proto != IPPROTO_ESP) { netdev_info(netdev, "Only ESP xfrm state may be offloaded\n"); return -EINVAL; @@ -253,6 +224,67 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n"); return -EINVAL; } + switch (x->xso.type) { + case XFRM_DEV_OFFLOAD_CRYPTO: + if (!(mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_CRYPTO)) { + netdev_info(netdev, "Crypto offload is not supported\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n"); + return -EINVAL; + } + break; + case XFRM_DEV_OFFLOAD_PACKET: + if (!(mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { + netdev_info(netdev, "Packet offload is not supported\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT) { + netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n"); + return -EINVAL; + } + + if (x->replay_esn && x->replay_esn->replay_window != 32 && + x->replay_esn->replay_window != 64 && + x->replay_esn->replay_window != 128 && + x->replay_esn->replay_window != 256) { + netdev_info(netdev, + "Unsupported replay window size %u\n", + x->replay_esn->replay_window); + return -EINVAL; + } + + if (!x->props.reqid) { + netdev_info(netdev, "Cannot offload without reqid\n"); + return -EINVAL; + } + + if (x->lft.hard_byte_limit != XFRM_INF || + x->lft.soft_byte_limit != XFRM_INF) { + netdev_info(netdev, + "Device doesn't support limits in bytes\n"); + return -EINVAL; + } + + if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && + x->lft.hard_packet_limit != XFRM_INF) { + /* XFRM stack doesn't prevent such configuration :(. */ + netdev_info(netdev, + "Hard packet limit must be greater than soft one\n"); + return -EINVAL; + } + break; + default: + netdev_info(netdev, "Unsupported xfrm offload type %d\n", + x->xso.type); + return -EINVAL; + } return 0; } @@ -270,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct net_device *netdev = x->xso.real_dev; + struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; int err; @@ -277,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (!priv->ipsec) return -EOPNOTSUPP; + ipsec = priv->ipsec; err = mlx5e_xfrm_validate_state(x); if (err) return err; @@ -288,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) } sa_entry->x = x; - sa_entry->ipsec = priv->ipsec; + sa_entry->ipsec = ipsec; /* check esn */ mlx5e_ipsec_update_esn_state(sa_entry); @@ -299,25 +333,29 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (err) goto err_xfrm; - err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry); + err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); if (err) goto err_hw_ctx; - if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) { - err = mlx5e_ipsec_sadb_rx_add(sa_entry); - if (err) - goto err_add_rule; - } else { + /* We use *_bh() variant because xfrm_timer_handler(), which runs + * in softirq context, can reach our state delete logic and we need + * xa_erase_bh() there. + */ + err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry, + GFP_KERNEL); + if (err) + goto err_add_rule; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; - } INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state); x->xso.offload_handle = (unsigned long)sa_entry; - goto out; + return 0; err_add_rule: - mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(sa_entry); err_hw_ctx: mlx5_ipsec_free_sa_ctx(sa_entry); err_xfrm: @@ -329,18 +367,19 @@ out: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5e_ipsec_sa_entry *old; - if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) - mlx5e_ipsec_sadb_rx_del(sa_entry); + old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); + WARN_ON(old != sa_entry); } static void mlx5e_xfrm_free_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - struct mlx5e_priv *priv = netdev_priv(x->xso.dev); cancel_work_sync(&sa_entry->modify_work.work); - mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry); kfree(sa_entry); } @@ -359,23 +398,33 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) if (!ipsec) return; - hash_init(ipsec->sadb_rx); - spin_lock_init(&ipsec->sadb_rx_lock); + xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); ipsec->mdev = priv->mdev; ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) goto err_wq; + if (mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_PACKET_OFFLOAD) { + ret = mlx5e_ipsec_aso_init(ipsec); + if (ret) + goto err_aso; + } + ret = mlx5e_accel_ipsec_fs_init(ipsec); if (ret) goto err_fs_init; + ipsec->fs = priv->fs; priv->ipsec = ipsec; netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return; err_fs_init: + if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + mlx5e_ipsec_aso_cleanup(ipsec); +err_aso: destroy_workqueue(ipsec->wq); err_wq: kfree(ipsec); @@ -391,6 +440,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) return; mlx5e_accel_ipsec_fs_cleanup(ipsec); + if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + mlx5e_ipsec_aso_cleanup(ipsec); destroy_workqueue(ipsec->wq); kfree(ipsec); priv->ipsec = NULL; @@ -426,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) queue_work(sa_entry->ipsec->wq, &modify_work->work); } +static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + int err; + + lockdep_assert_held(&x->lock); + + if (sa_entry->attrs.soft_packet_limit == XFRM_INF) + /* Limits are not configured, as soft limit + * must be lowever than hard limit. + */ + return; + + err = mlx5e_ipsec_aso_query(sa_entry, NULL); + if (err) + return; + + mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets); +} + +static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x) +{ + struct net_device *netdev = x->xdo.real_dev; + + if (x->type != XFRM_POLICY_TYPE_MAIN) { + netdev_info(netdev, "Cannot offload non-main policy types\n"); + return -EINVAL; + } + + /* Please pay attention that we support only one template */ + if (x->xfrm_nr > 1) { + netdev_info(netdev, "Cannot offload more than one template\n"); + return -EINVAL; + } + + if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN && + x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) { + netdev_info(netdev, "Cannot offload forward policy\n"); + return -EINVAL; + } + + if (!x->xfrm_vec[0].reqid) { + netdev_info(netdev, "Cannot offload policy without reqid\n"); + return -EINVAL; + } + + if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) { + netdev_info(netdev, "Unsupported xfrm offload type\n"); + return -EINVAL; + } + + return 0; +} + +static void +mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, + struct mlx5_accel_pol_xfrm_attrs *attrs) +{ + struct xfrm_policy *x = pol_entry->x; + struct xfrm_selector *sel; + + sel = &x->selector; + memset(attrs, 0, sizeof(*attrs)); + + memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr)); + memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr)); + attrs->family = sel->family; + attrs->dir = x->xdo.dir; + attrs->action = x->action; + attrs->type = XFRM_DEV_OFFLOAD_PACKET; + attrs->reqid = x->xfrm_vec[0].reqid; +} + +static int mlx5e_xfrm_add_policy(struct xfrm_policy *x) +{ + struct net_device *netdev = x->xdo.real_dev; + struct mlx5e_ipsec_pol_entry *pol_entry; + struct mlx5e_priv *priv; + int err; + + priv = netdev_priv(netdev); + if (!priv->ipsec) + return -EOPNOTSUPP; + + err = mlx5e_xfrm_validate_policy(x); + if (err) + return err; + + pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL); + if (!pol_entry) + return -ENOMEM; + + pol_entry->x = x; + pol_entry->ipsec = priv->ipsec; + + mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs); + err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); + if (err) + goto err_fs; + + x->xdo.offload_handle = (unsigned long)pol_entry; + return 0; + +err_fs: + kfree(pol_entry); + return err; +} + +static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +{ + struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); + + mlx5e_accel_ipsec_fs_del_pol(pol_entry); + kfree(pol_entry); +} + static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_add = mlx5e_xfrm_add_state, .xdo_dev_state_delete = mlx5e_xfrm_del_state, @@ -434,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, }; +static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { + .xdo_dev_state_add = mlx5e_xfrm_add_state, + .xdo_dev_state_delete = mlx5e_xfrm_del_state, + .xdo_dev_state_free = mlx5e_xfrm_free_state, + .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, + .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, + + .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, + .xdo_dev_policy_add = mlx5e_xfrm_add_policy, + .xdo_dev_policy_free = mlx5e_xfrm_free_policy, +}; + void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -443,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) return; mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); - netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; + + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops; + else + netdev->xfrmdev_ops = & |