Skip to content

Commit 5fac748

Browse files
committed
tcp: add the ability to control max RTO
JIRA: https://issues.redhat.com/browse/RHEL-115393 Upstream Status: linux.git Conflicts:\ - Context differences due to missing upstream commit 54b771e ("doc: net: Fix .rst rendering of net_cachelines pages") in c10s. commit 54a378f Author: Eric Dumazet <edumazet@google.com> Date: Fri Feb 7 15:28:29 2025 +0000 tcp: add the ability to control max RTO Currently, TCP stack uses a constant (120 seconds) to limit the RTO value exponential growth. Some applications want to set a lower value. Add TCP_RTO_MAX_MS socket option to set a value (in ms) between 1 and 120 seconds. It is discouraged to change the socket rto max on a live socket, as it might lead to unexpected disconnects. Following patch is adding a netns sysctl to control the default value at socket creation time. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jason Xing <kerneljasonxing@gmail.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Antoine Tenart <atenart@redhat.com>
1 parent 79df165 commit 5fac748

File tree

9 files changed

+39
-17
lines changed

9 files changed

+39
-17
lines changed

Documentation/networking/net_cachelines/inet_connection_sock.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ struct_timer_list icsk_retransmit_timer read_mostly -
1616
struct_timer_list icsk_delack_timer read_mostly - inet_csk_reset_xmit_timer,tcp_connect
1717
u32 icsk_rto read_write - tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one
1818
u32 icsk_rto_min - -
19+
u32 icsk_rto_max read_mostly - tcp_reset_xmit_timer
1920
u32 icsk_delack_max - -
2021
u32 icsk_pmtu_cookie read_write - tcp_sync_mss,tcp_current_mss,tcp_send_syn_data,tcp_connect_init,tcp_connect
2122
struct_tcp_congestion_ops icsk_ca_ops read_write - tcp_cwnd_validate,tcp_tso_segs,tcp_ca_dst_init,tcp_connect_init,tcp_connect,tcp_write_xmit

include/net/inet_connection_sock.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ struct inet_connection_sock {
9090
struct timer_list icsk_delack_timer;
9191
__u32 icsk_rto;
9292
__u32 icsk_rto_min;
93+
u32 icsk_rto_max;
9394
__u32 icsk_delack_max;
9495
__u32 icsk_pmtu_cookie;
9596
const struct tcp_congestion_ops *icsk_ca_ops;

include/net/tcp.h

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,9 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
144144
#define TCP_DELACK_MIN 4U
145145
#define TCP_ATO_MIN 4U
146146
#endif
147-
#define TCP_RTO_MAX ((unsigned)(120*HZ))
148-
#define TCP_RTO_MIN ((unsigned)(HZ/5))
147+
#define TCP_RTO_MAX_SEC 120
148+
#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
149+
#define TCP_RTO_MIN ((unsigned)(HZ / 5))
149150
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
150151

151152
#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
@@ -754,10 +755,14 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu);
754755
int tcp_mss_to_mtu(struct sock *sk, int mss);
755756
void tcp_mtup_init(struct sock *sk);
756757

758+
static inline unsigned int tcp_rto_max(const struct sock *sk)
759+
{
760+
return READ_ONCE(inet_csk(sk)->icsk_rto_max);
761+
}
762+
757763
static inline void tcp_bound_rto(struct sock *sk)
758764
{
759-
if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
760-
inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
765+
inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
761766
}
762767

763768
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
@@ -1444,7 +1449,8 @@ static inline void tcp_reset_xmit_timer(struct sock *sk,
14441449
{
14451450
if (pace_delay)
14461451
when += tcp_pacing_delay(sk);
1447-
inet_csk_reset_xmit_timer(sk, what, when, TCP_RTO_MAX);
1452+
inet_csk_reset_xmit_timer(sk, what, when,
1453+
tcp_rto_max(sk));
14481454
}
14491455

14501456
/* Something is really bad, we could not queue an additional packet,

include/uapi/linux/tcp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,7 @@ enum {
136136
#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */
137137

138138
#define TCP_IS_MPTCP 43 /* Is MPTCP being used? */
139+
#define TCP_RTO_MAX_MS 44 /* max rto time in ms */
139140

140141
#define TCP_REPAIR_ON 1
141142
#define TCP_REPAIR_OFF 0

net/ipv4/tcp.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,10 @@ void tcp_init_sock(struct sock *sk)
432432
INIT_LIST_HEAD(&tp->tsorted_sent_queue);
433433

434434
icsk->icsk_rto = TCP_TIMEOUT_INIT;
435+
436+
/* Use a sysctl ? */
437+
icsk->icsk_rto_max = TCP_RTO_MAX;
438+
435439
rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us);
436440
icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us);
437441
icsk->icsk_delack_max = TCP_DELACK_MAX;
@@ -3800,6 +3804,11 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
38003804
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
38013805
TCP_RTO_MAX / HZ));
38023806
return 0;
3807+
case TCP_RTO_MAX_MS:
3808+
if (val < MSEC_PER_SEC || val > TCP_RTO_MAX_SEC * MSEC_PER_SEC)
3809+
return -EINVAL;
3810+
WRITE_ONCE(inet_csk(sk)->icsk_rto_max, msecs_to_jiffies(val));
3811+
return 0;
38033812
}
38043813

38053814
sockopt_lock_sock(sk);
@@ -4636,6 +4645,9 @@ int do_tcp_getsockopt(struct sock *sk, int level,
46364645
case TCP_IS_MPTCP:
46374646
val = 0;
46384647
break;
4648+
case TCP_RTO_MAX_MS:
4649+
val = jiffies_to_msecs(tcp_rto_max(sk));
4650+
break;
46394651
default:
46404652
return -ENOPROTOOPT;
46414653
}

net/ipv4/tcp_input.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3566,7 +3566,7 @@ static void tcp_ack_probe(struct sock *sk)
35663566
* This function is not for random using!
35673567
*/
35683568
} else {
3569-
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
3569+
unsigned long when = tcp_probe0_when(sk, tcp_rto_max(sk));
35703570

35713571
when = tcp_clamp_probe0_to_user_timeout(sk, when);
35723572
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true);

net/ipv4/tcp_ipv4.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
456456

457457
icsk->icsk_backoff--;
458458
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
459-
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
459+
icsk->icsk_rto = inet_csk_rto_backoff(icsk, tcp_rto_max(sk));
460460

461461
tcp_mstamp_refresh(tp);
462462
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));

net/ipv4/tcp_output.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4252,7 +4252,7 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
42524252
unsigned long delay;
42534253

42544254
delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4255-
if (delay < TCP_RTO_MAX)
4255+
if (delay < tcp_rto_max(sk))
42564256
icsk->icsk_ack.retry++;
42574257
inet_csk_schedule_ack(sk);
42584258
icsk->icsk_ack.ato = TCP_ATO_MIN;
@@ -4392,7 +4392,7 @@ void tcp_send_probe0(struct sock *sk)
43924392
if (err <= 0) {
43934393
if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
43944394
icsk->icsk_backoff++;
4395-
timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
4395+
timeout = tcp_probe0_when(sk, tcp_rto_max(sk));
43964396
} else {
43974397
/* If packet was not sent due to local congestion,
43984398
* Let senders fight for local resources conservatively.

net/ipv4/tcp_timer.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
109109

110110
/* If peer does not open window for long time, or did not transmit
111111
* anything for long time, penalize it. */
112-
if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
112+
if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*tcp_rto_max(sk) || !do_reset)
113113
shift++;
114114

115115
/* If some dubious ICMP arrived, penalize even more. */
@@ -189,12 +189,12 @@ static unsigned int tcp_model_timeout(struct sock *sk,
189189
{
190190
unsigned int linear_backoff_thresh, timeout;
191191

192-
linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
192+
linear_backoff_thresh = ilog2(tcp_rto_max(sk) / rto_base);
193193
if (boundary <= linear_backoff_thresh)
194194
timeout = ((2 << boundary) - 1) * rto_base;
195195
else
196196
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
197-
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
197+
(boundary - linear_backoff_thresh) * tcp_rto_max(sk);
198198
return jiffies_to_msecs(timeout);
199199
}
200200
/**
@@ -268,7 +268,7 @@ static int tcp_write_timeout(struct sock *sk)
268268

269269
retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
270270
if (sock_flag(sk, SOCK_DEAD)) {
271-
const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
271+
const bool alive = icsk->icsk_rto < tcp_rto_max(sk);
272272

273273
retry_until = tcp_orphan_retries(sk, alive);
274274
do_reset = alive ||
@@ -407,7 +407,8 @@ static void tcp_probe_timer(struct sock *sk)
407407
}
408408
max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
409409
if (sock_flag(sk, SOCK_DEAD)) {
410-
const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
410+
unsigned int rto_max = tcp_rto_max(sk);
411+
const bool alive = inet_csk_rto_backoff(icsk, rto_max) < rto_max;
411412

412413
max_probes = tcp_orphan_retries(sk, alive);
413414
if (!alive && icsk->icsk_backoff >= max_probes)
@@ -483,7 +484,7 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
483484
const struct inet_connection_sock *icsk = inet_csk(sk);
484485
u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
485486
const struct tcp_sock *tp = tcp_sk(sk);
486-
int timeout = TCP_RTO_MAX * 2;
487+
int timeout = tcp_rto_max(sk) * 2;
487488
s32 rcv_delta;
488489

489490
if (user_timeout) {
@@ -656,15 +657,15 @@ void tcp_retransmit_timer(struct sock *sk)
656657
icsk->icsk_backoff = 0;
657658
icsk->icsk_rto = clamp(__tcp_set_rto(tp),
658659
tcp_rto_min(sk),
659-
TCP_RTO_MAX);
660+
tcp_rto_max(sk));
660661
} else if (sk->sk_state != TCP_SYN_SENT ||
661662
tp->total_rto >
662663
READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
663664
/* Use normal (exponential) backoff unless linear timeouts are
664665
* activated.
665666
*/
666667
icsk->icsk_backoff++;
667-
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
668+
icsk->icsk_rto = min(icsk->icsk_rto << 1, tcp_rto_max(sk));
668669
}
669670
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
670671
tcp_clamp_rto_to_user_timeout(sk), false);

0 commit comments

Comments
 (0)