summaryrefslogtreecommitdiff
path: root/net/mptcp/subflow.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r--net/mptcp/subflow.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index bf5e5c72b5ee..281c1cc8dc8d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -723,9 +723,12 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
if (!ctx)
return;
- subflow_ulp_fallback(ssk, ctx);
- if (ctx->conn)
- sock_put(ctx->conn);
+ list_del(&mptcp_subflow_ctx(ssk)->node);
+ if (inet_csk(ssk)->icsk_ulp_ops) {
+ subflow_ulp_fallback(ssk, ctx);
+ if (ctx->conn)
+ sock_put(ctx->conn);
+ }
kfree_rcu(ctx, rcu);
}
@@ -1824,6 +1827,7 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
struct mptcp_sock *msk, *next, *head = NULL;
struct request_sock *req;
+ struct sock *sk;
/* build a list of all unaccepted mptcp sockets */
spin_lock_bh(&queue->rskq_lock);
@@ -1839,11 +1843,12 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
continue;
/* skip if already in list */
- msk = mptcp_sk(subflow->conn);
+ sk = subflow->conn;
+ msk = mptcp_sk(sk);
if (msk->dl_next || msk == head)
continue;
- sock_hold(subflow->conn);
+ sock_hold(sk);
msk->dl_next = head;
head = msk;
}
@@ -1857,16 +1862,13 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
release_sock(listener_ssk);
for (msk = head; msk; msk = next) {
- struct sock *sk = (struct sock *)msk;
+ sk = (struct sock *)msk;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
next = msk->dl_next;
msk->dl_next = NULL;
- /* prevent the stack from later re-schedule the worker for
- * this socket
- */
- inet_sk_state_store(sk, TCP_CLOSE);
+ __mptcp_unaccepted_force_close(sk);
release_sock(sk);
/* lockdep will report a false positive ABBA deadlock