summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
diff options
context:
space:
mode:
authorAyush Sawal <ayush.sawal@chelsio.com>2021-01-12 11:06:00 +0530
committerJakub Kicinski <kuba@kernel.org>2021-01-13 19:40:05 -0800
commit8ad2a970d2010add3963e7219eb50367ab3fa4eb (patch)
treedb4d41a94635a2319ff00da6a967beb12ec0ddc4 /drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
parent7128c834d30e6b2cf649f14d8fc274941786d0e1 (diff)
downloadlinux-8ad2a970d2010add3963e7219eb50367ab3fa4eb.tar.gz
linux-8ad2a970d2010add3963e7219eb50367ab3fa4eb.tar.bz2
linux-8ad2a970d2010add3963e7219eb50367ab3fa4eb.zip
cxgb4/chtls: Fix tid stuck due to wrong update of qid
TID stuck is seen when there is a race in CPL_PASS_ACCEPT_RPL/CPL_ABORT_REQ and abort is arriving before the accept reply, which sets the queue number. In this case HW ends up sending CPL_ABORT_RPL_RSS to an incorrect ingress queue. V1->V2: - Removed the unused variable len in chtls_set_quiesce_ctrl(). V2->V3: - As kfree_skb() has a check for null skb, so removed this check before calling kfree_skb() in func chtls_send_reset(). Fixes: cc35c88ae4db ("crypto : chtls - CPL handler definition") Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com> Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com> Link: https://lore.kernel.org/r/20210112053600.24590-1-ayush.sawal@chelsio.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c')
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index a4fb463af22a..1e67140b0f80 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
return ret < 0 ? ret : 0;
}
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+ u64 mask, u64 val, u8 cookie,
+ int through_l2t)
+{
+ struct sk_buff *skb;
+ unsigned int wrlen;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return;
+
+ __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+ send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
/*
* Set one of the t_flags bits in the TCB.
*/
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
TF_RX_QUIESCE_V(val));
}
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+ wrlen = roundup(wrlen, 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+}
+
/* TLS Key bitmap processing */
int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
{