@@ -560,10 +560,9 @@ static bool mptcp_check_data_fin(struct sock *sk)
560560
561561static void mptcp_dss_corruption (struct mptcp_sock * msk , struct sock * ssk )
562562{
563- if (READ_ONCE ( msk -> allow_infinite_fallback )) {
563+ if (mptcp_try_fallback ( ssk )) {
564564 MPTCP_INC_STATS (sock_net (ssk ),
565565 MPTCP_MIB_DSSCORRUPTIONFALLBACK );
566- mptcp_do_fallback (ssk );
567566 } else {
568567 MPTCP_INC_STATS (sock_net (ssk ), MPTCP_MIB_DSSCORRUPTIONRESET );
569568 mptcp_subflow_reset (ssk );
@@ -792,7 +791,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
792791static void mptcp_subflow_joined (struct mptcp_sock * msk , struct sock * ssk )
793792{
794793 mptcp_subflow_ctx (ssk )-> map_seq = READ_ONCE (msk -> ack_seq );
795- WRITE_ONCE ( msk -> allow_infinite_fallback , false) ;
794+ msk -> allow_infinite_fallback = false;
796795 mptcp_event (MPTCP_EVENT_SUB_ESTABLISHED , msk , ssk , GFP_ATOMIC );
797796}
798797
@@ -803,6 +802,14 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
803802 if (sk -> sk_state != TCP_ESTABLISHED )
804803 return false;
805804
805+ spin_lock_bh (& msk -> fallback_lock );
806+ if (!msk -> allow_subflows ) {
807+ spin_unlock_bh (& msk -> fallback_lock );
808+ return false;
809+ }
810+ mptcp_subflow_joined (msk , ssk );
811+ spin_unlock_bh (& msk -> fallback_lock );
812+
806813 /* attach to msk socket only after we are sure we will deal with it
807814 * at close time
808815 */
@@ -811,7 +818,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
811818
812819 mptcp_subflow_ctx (ssk )-> subflow_id = msk -> subflow_id ++ ;
813820 mptcp_sockopt_sync_locked (msk , ssk );
814- mptcp_subflow_joined (msk , ssk );
815821 mptcp_stop_tout_timer (sk );
816822 __mptcp_propagate_sndbuf (sk , ssk );
817823 return true;
@@ -1136,10 +1142,14 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
11361142 mpext -> infinite_map = 1 ;
11371143 mpext -> data_len = 0 ;
11381144
1145+ if (!mptcp_try_fallback (ssk )) {
1146+ mptcp_subflow_reset (ssk );
1147+ return ;
1148+ }
1149+
11391150 MPTCP_INC_STATS (sock_net (ssk ), MPTCP_MIB_INFINITEMAPTX );
11401151 mptcp_subflow_ctx (ssk )-> send_infinite_map = 0 ;
11411152 pr_fallback (msk );
1142- mptcp_do_fallback (ssk );
11431153}
11441154
11451155#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
@@ -2543,9 +2553,9 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
25432553
25442554static void __mptcp_retrans (struct sock * sk )
25452555{
2556+ struct mptcp_sendmsg_info info = { .data_lock_held = true, };
25462557 struct mptcp_sock * msk = mptcp_sk (sk );
25472558 struct mptcp_subflow_context * subflow ;
2548- struct mptcp_sendmsg_info info = {};
25492559 struct mptcp_data_frag * dfrag ;
25502560 struct sock * ssk ;
25512561 int ret , err ;
@@ -2590,6 +2600,18 @@ static void __mptcp_retrans(struct sock *sk)
25902600 info .sent = 0 ;
25912601 info .limit = READ_ONCE (msk -> csum_enabled ) ? dfrag -> data_len :
25922602 dfrag -> already_sent ;
2603+
2604+ /*
2605+ * make the whole retrans decision, xmit, disallow
2606+ * fallback atomic
2607+ */
2608+ spin_lock_bh (& msk -> fallback_lock );
2609+ if (__mptcp_check_fallback (msk )) {
2610+ spin_unlock_bh (& msk -> fallback_lock );
2611+ release_sock (ssk );
2612+ return ;
2613+ }
2614+
25932615 while (info .sent < info .limit ) {
25942616 ret = mptcp_sendmsg_frag (sk , ssk , dfrag , & info );
25952617 if (ret <= 0 )
@@ -2603,8 +2625,9 @@ static void __mptcp_retrans(struct sock *sk)
26032625 len = max (copied , len );
26042626 tcp_push (ssk , 0 , info .mss_now , tcp_sk (ssk )-> nonagle ,
26052627 info .size_goal );
2606- WRITE_ONCE ( msk -> allow_infinite_fallback , false) ;
2628+ msk -> allow_infinite_fallback = false;
26072629 }
2630+ spin_unlock_bh (& msk -> fallback_lock );
26082631
26092632 release_sock (ssk );
26102633 }
@@ -2730,14 +2753,16 @@ static void __mptcp_init_sock(struct sock *sk)
27302753 WRITE_ONCE (msk -> first , NULL );
27312754 inet_csk (sk )-> icsk_sync_mss = mptcp_sync_mss ;
27322755 WRITE_ONCE (msk -> csum_enabled , mptcp_is_checksum_enabled (sock_net (sk )));
2733- WRITE_ONCE (msk -> allow_infinite_fallback , true);
2756+ msk -> allow_infinite_fallback = true;
2757+ msk -> allow_subflows = true;
27342758 msk -> recovery = false;
27352759 msk -> subflow_id = 1 ;
27362760 msk -> last_data_sent = tcp_jiffies32 ;
27372761 msk -> last_data_recv = tcp_jiffies32 ;
27382762 msk -> last_ack_recv = tcp_jiffies32 ;
27392763
27402764 mptcp_pm_data_init (msk );
2765+ spin_lock_init (& msk -> fallback_lock );
27412766
27422767 /* re-use the csk retrans timer for MPTCP-level retrans */
27432768 timer_setup (& msk -> sk .icsk_retransmit_timer , mptcp_retransmit_timer , 0 );
@@ -3117,7 +3142,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
31173142 * subflow
31183143 */
31193144 mptcp_destroy_common (msk , MPTCP_CF_FASTCLOSE );
3145+
3146+ /* The first subflow is already in TCP_CLOSE status, the following
3147+ * can't overlap with a fallback anymore
3148+ */
3149+ spin_lock_bh (& msk -> fallback_lock );
3150+ msk -> allow_subflows = true;
3151+ msk -> allow_infinite_fallback = true;
31203152 WRITE_ONCE (msk -> flags , 0 );
3153+ spin_unlock_bh (& msk -> fallback_lock );
3154+
31213155 msk -> cb_flags = 0 ;
31223156 msk -> recovery = false;
31233157 WRITE_ONCE (msk -> can_ack , false);
@@ -3524,7 +3558,13 @@ bool mptcp_finish_join(struct sock *ssk)
35243558
35253559 /* active subflow, already present inside the conn_list */
35263560 if (!list_empty (& subflow -> node )) {
3561+ spin_lock_bh (& msk -> fallback_lock );
3562+ if (!msk -> allow_subflows ) {
3563+ spin_unlock_bh (& msk -> fallback_lock );
3564+ return false;
3565+ }
35273566 mptcp_subflow_joined (msk , ssk );
3567+ spin_unlock_bh (& msk -> fallback_lock );
35283568 mptcp_propagate_sndbuf (parent , ssk );
35293569 return true;
35303570 }
0 commit comments