Skip to content

Commit 4026310

Browse files
committed
Merge branch 'mptcp-misc-fixes-for-v6-18-rc7'
Matthieu Baerts says: ==================== mptcp: misc fixes for v6.18-rc7 Here are various unrelated fixes: - Patch 1: Fix window space computation for fallback connections which can affect ACK generation. A fix for v5.11. - Patch 2: Avoid unneeded subflow-level drops due to unsynced received window. A fix for v5.11. - Patch 3: Avoid premature close for fallback connections with PREEMPT kernels. A fix for v5.12. - Patch 4: Reset instead of fallback in case of data in the MPTCP out-of-order queue. A fix for v5.7. - Patches 5-7: Avoid also sending "plain" TCP reset when closing with an MP_FASTCLOSE. A fix for v6.1. - Patches 8-9: Longer timeout for background connections in MPTCP Join selftests. An additional fix for recent patches for v5.13/v6.1. - Patches 10-11: Fix typo in a check introduce in a recent refactoring. A fix for v6.15. ==================== Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-0-806d3781c95f@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents e31a11b + 0eee0fd commit 4026310

5 files changed

Lines changed: 113 additions & 32 deletions

File tree

net/mptcp/options.c

Lines changed: 53 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -838,8 +838,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
838838

839839
opts->suboptions = 0;
840840

841+
/* Force later mptcp_write_options(), but do not use any actual
842+
* option space.
843+
*/
841844
if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
842-
return false;
845+
return true;
843846

844847
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
845848
if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
@@ -1041,6 +1044,31 @@ static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
10411044
WRITE_ONCE(msk->snd_una, new_snd_una);
10421045
}
10431046

1047+
static void rwin_update(struct mptcp_sock *msk, struct sock *ssk,
1048+
struct sk_buff *skb)
1049+
{
1050+
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1051+
struct tcp_sock *tp = tcp_sk(ssk);
1052+
u64 mptcp_rcv_wnd;
1053+
1054+
/* Avoid touching extra cachelines if TCP is going to accept this
1055+
* skb without filling the TCP-level window even with a possibly
1056+
* outdated mptcp-level rwin.
1057+
*/
1058+
if (!skb->len || skb->len < tcp_receive_window(tp))
1059+
return;
1060+
1061+
mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent);
1062+
if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent))
1063+
return;
1064+
1065+
/* Some other subflow grew the mptcp-level rwin since rcv_wup,
1066+
* resync.
1067+
*/
1068+
tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent;
1069+
subflow->rcv_wnd_sent = mptcp_rcv_wnd;
1070+
}
1071+
10441072
static void ack_update_msk(struct mptcp_sock *msk,
10451073
struct sock *ssk,
10461074
struct mptcp_options_received *mp_opt)
@@ -1208,6 +1236,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
12081236
*/
12091237
if (mp_opt.use_ack)
12101238
ack_update_msk(msk, sk, &mp_opt);
1239+
rwin_update(msk, sk, skb);
12111240

12121241
/* Zero-data-length packets are dropped by the caller and not
12131242
* propagated to the MPTCP layer, so the skb extension does not
@@ -1294,6 +1323,10 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
12941323

12951324
if (rcv_wnd_new != rcv_wnd_old) {
12961325
raise_win:
1326+
/* The msk-level rcv wnd is after the tcp level one,
1327+
* sync the latter.
1328+
*/
1329+
rcv_wnd_new = rcv_wnd_old;
12971330
win = rcv_wnd_old - ack_seq;
12981331
tp->rcv_wnd = min_t(u64, win, U32_MAX);
12991332
new_win = tp->rcv_wnd;
@@ -1317,6 +1350,21 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
13171350

13181351
update_wspace:
13191352
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
1353+
subflow->rcv_wnd_sent = rcv_wnd_new;
1354+
}
1355+
1356+
static void mptcp_track_rwin(struct tcp_sock *tp)
1357+
{
1358+
const struct sock *ssk = (const struct sock *)tp;
1359+
struct mptcp_subflow_context *subflow;
1360+
struct mptcp_sock *msk;
1361+
1362+
if (!ssk)
1363+
return;
1364+
1365+
subflow = mptcp_subflow_ctx(ssk);
1366+
msk = mptcp_sk(subflow->conn);
1367+
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
13201368
}
13211369

13221370
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
@@ -1611,6 +1659,10 @@ void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
16111659
opts->reset_transient,
16121660
opts->reset_reason);
16131661
return;
1662+
} else if (unlikely(!opts->suboptions)) {
1663+
/* Fallback to TCP */
1664+
mptcp_track_rwin(tp);
1665+
return;
16141666
}
16151667

16161668
if (OPTION_MPTCP_PRIO & opts->suboptions) {

net/mptcp/pm_kernel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
672672

673673
void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
674674
{
675-
if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
675+
if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
676676
u8 limit_add_addr_accepted =
677677
mptcp_pm_get_limit_add_addr_accepted(msk);
678678

net/mptcp/protocol.c

Lines changed: 41 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,13 @@ bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
7676
if (__mptcp_check_fallback(msk))
7777
return true;
7878

79+
/* The caller possibly is not holding the msk socket lock, but
80+
* in the fallback case only the current subflow is touching
81+
* the OoO queue.
82+
*/
83+
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
84+
return false;
85+
7986
spin_lock_bh(&msk->fallback_lock);
8087
if (!msk->allow_infinite_fallback) {
8188
spin_unlock_bh(&msk->fallback_lock);
@@ -2402,7 +2409,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
24022409

24032410
/* flags for __mptcp_close_ssk() */
24042411
#define MPTCP_CF_PUSH BIT(1)
2405-
#define MPTCP_CF_FASTCLOSE BIT(2)
24062412

24072413
/* be sure to send a reset only if the caller asked for it, also
24082414
* clean completely the subflow status when the subflow reaches
@@ -2413,7 +2419,7 @@ static void __mptcp_subflow_disconnect(struct sock *ssk,
24132419
unsigned int flags)
24142420
{
24152421
if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2416-
(flags & MPTCP_CF_FASTCLOSE)) {
2422+
subflow->send_fastclose) {
24172423
/* The MPTCP code never wait on the subflow sockets, TCP-level
24182424
* disconnect should never fail
24192425
*/
@@ -2460,14 +2466,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
24602466

24612467
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
24622468

2463-
if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
2464-
/* be sure to force the tcp_close path
2465-
* to generate the egress reset
2466-
*/
2467-
ssk->sk_lingertime = 0;
2468-
sock_set_flag(ssk, SOCK_LINGER);
2469-
subflow->send_fastclose = 1;
2470-
}
2469+
if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
2470+
tcp_set_state(ssk, TCP_CLOSE);
24712471

24722472
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
24732473
if (!dispose_it) {
@@ -2563,7 +2563,8 @@ static void __mptcp_close_subflow(struct sock *sk)
25632563

25642564
if (ssk_state != TCP_CLOSE &&
25652565
(ssk_state != TCP_CLOSE_WAIT ||
2566-
inet_sk_state_load(sk) != TCP_ESTABLISHED))
2566+
inet_sk_state_load(sk) != TCP_ESTABLISHED ||
2567+
__mptcp_check_fallback(msk)))
25672568
continue;
25682569

25692570
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2771,9 +2772,26 @@ static void mptcp_do_fastclose(struct sock *sk)
27712772
struct mptcp_sock *msk = mptcp_sk(sk);
27722773

27732774
mptcp_set_state(sk, TCP_CLOSE);
2774-
mptcp_for_each_subflow_safe(msk, subflow, tmp)
2775-
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
2776-
subflow, MPTCP_CF_FASTCLOSE);
2775+
2776+
/* Explicitly send the fastclose reset as need */
2777+
if (__mptcp_check_fallback(msk))
2778+
return;
2779+
2780+
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2781+
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2782+
2783+
lock_sock(ssk);
2784+
2785+
/* Some subflow socket states don't allow/need a reset.*/
2786+
if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
2787+
goto unlock;
2788+
2789+
subflow->send_fastclose = 1;
2790+
tcp_send_active_reset(ssk, ssk->sk_allocation,
2791+
SK_RST_REASON_TCP_ABORT_ON_CLOSE);
2792+
unlock:
2793+
release_sock(ssk);
2794+
}
27772795
}
27782796

27792797
static void mptcp_worker(struct work_struct *work)
@@ -2800,7 +2818,11 @@ static void mptcp_worker(struct work_struct *work)
28002818
__mptcp_close_subflow(sk);
28012819

28022820
if (mptcp_close_tout_expired(sk)) {
2821+
struct mptcp_subflow_context *subflow, *tmp;
2822+
28032823
mptcp_do_fastclose(sk);
2824+
mptcp_for_each_subflow_safe(msk, subflow, tmp)
2825+
__mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
28042826
mptcp_close_wake_up(sk);
28052827
}
28062828

@@ -3225,7 +3247,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
32253247
/* msk->subflow is still intact, the following will not free the first
32263248
* subflow
32273249
*/
3228-
mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
3250+
mptcp_do_fastclose(sk);
3251+
mptcp_destroy_common(msk);
32293252

32303253
/* The first subflow is already in TCP_CLOSE status, the following
32313254
* can't overlap with a fallback anymore
@@ -3404,7 +3427,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
34043427
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
34053428
}
34063429

3407-
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
3430+
void mptcp_destroy_common(struct mptcp_sock *msk)
34083431
{
34093432
struct mptcp_subflow_context *subflow, *tmp;
34103433
struct sock *sk = (struct sock *)msk;
@@ -3413,7 +3436,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
34133436

34143437
/* join list will be eventually flushed (with rst) at sock lock release time */
34153438
mptcp_for_each_subflow_safe(msk, subflow, tmp)
3416-
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
3439+
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
34173440

34183441
__skb_queue_purge(&sk->sk_receive_queue);
34193442
skb_rbtree_purge(&msk->out_of_order_queue);
@@ -3431,7 +3454,7 @@ static void mptcp_destroy(struct sock *sk)
34313454

34323455
/* allow the following to close even the initial subflow */
34333456
msk->free_first = 1;
3434-
mptcp_destroy_common(msk, 0);
3457+
mptcp_destroy_common(msk);
34353458
sk_sockets_allocated_dec(sk);
34363459
}
34373460

net/mptcp/protocol.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,6 +509,7 @@ struct mptcp_subflow_context {
509509
u64 remote_key;
510510
u64 idsn;
511511
u64 map_seq;
512+
u64 rcv_wnd_sent;
512513
u32 snd_isn;
513514
u32 token;
514515
u32 rel_write_seq;
@@ -976,7 +977,7 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
976977
local_bh_enable();
977978
}
978979

979-
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
980+
void mptcp_destroy_common(struct mptcp_sock *msk);
980981

981982
#define MPTCP_TOKEN_MAX_RETRIES 4
982983

0 commit comments

Comments
 (0)