From 3e5014909b5661b3da59990d72a317a45ba3b284 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 6 Jan 2022 16:20:25 -0800 Subject: mptcp: cleanup MPJ subflow list handling We can simplify the join list handling leveraging the mptcp_release_cb(): if we can acquire the msk socket lock at mptcp_finish_join time, move the new subflow directly into the conn_list, otherwise place it on join_list and let the release_cb process such list. Since pending MPJ connection are now always processed in a timely way, we can avoid flushing the join list every time we have to process all the current subflows. Additionally we can now use the mptcp data lock to protect the join_list, removing the additional spin lock. Finally, the MPJ handshake is now always finalized under the msk socket lock, we can drop the additional synchronization between mptcp_finish_join() and mptcp_close(). Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net/mptcp/subflow.c') diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index d861307f7efe..a1cd39f97659 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1441,7 +1441,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); mptcp_info2sockaddr(remote, &addr, ssk->sk_family); - mptcp_add_pending_subflow(msk, subflow); + sock_hold(ssk); + list_add_tail(&subflow->node, &msk->conn_list); err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); if (err && err != -EINPROGRESS) goto failed_unlink; @@ -1452,9 +1453,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, return err; failed_unlink: - spin_lock_bh(&msk->join_list_lock); list_del(&subflow->node); - spin_unlock_bh(&msk->join_list_lock); sock_put(mptcp_subflow_tcp_sock(subflow)); failed: -- cgit v1.2.3