]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
tcp: avoid possible arithmetic overflows
authorEric Dumazet <edumazet@google.com>
Mon, 22 Sep 2014 20:19:44 +0000 (13:19 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 22 Sep 2014 20:27:10 +0000 (16:27 -0400)
icsk_rto is a 32bit field, and icsk_backoff can reach 15 by default,
or more if some sysctl (eg tcp_retries2) are changed.

Better use 64bit to perform icsk_rto << icsk_backoff operations

As Joe Perches suggested, add a helper for this.

Yuchung spotted the tcp_v4_err() case.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/inet_connection_sock.h
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c

index 5fbe6568c3cff7b025d4957cc28df4b77f051dc6..848e85cb5c6128ecfe101e386657ce355f507b5c 100644 (file)
@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
 #endif
 }
 
+static inline unsigned long
+inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
+                    unsigned long max_when)
+{
+        u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
+
+        return (unsigned long)min_t(u64, when, max_when);
+}
+
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
 struct request_sock *inet_csk_search_req(const struct sock *sk,
index 02fb66d4a0188568096bcf785ca448627a589c01..13f3da4762e3a857ba1e42de324f1fd32fa5017b 100644 (file)
@@ -3208,9 +3208,10 @@ static void tcp_ack_probe(struct sock *sk)
                 * This function is not for random using!
                 */
        } else {
+               unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
-                                         TCP_RTO_MAX);
+                                         when, TCP_RTO_MAX);
        }
 }
 
index 006b045716d87495d51363236825435e54657b10..3b2e49cb2b61ffcef110cde31fce6d62242f8fdb 100644 (file)
@@ -430,9 +430,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                        break;
 
                icsk->icsk_backoff--;
-               inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
-                       TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
-               tcp_bound_rto(sk);
+               icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
+                                              TCP_TIMEOUT_INIT;
+               icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 
                skb = tcp_write_queue_head(sk);
                BUG_ON(!skb);
index 7f1280dcad579315b393b0e2c7953fa05d0e3cf0..8c61a7c0c88961ff07db6b12949b5befb09f7318 100644 (file)
@@ -3279,6 +3279,7 @@ void tcp_send_probe0(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       unsigned long probe_max;
        int err;
 
        err = tcp_write_wakeup(sk);
@@ -3294,9 +3295,7 @@ void tcp_send_probe0(struct sock *sk)
                if (icsk->icsk_backoff < sysctl_tcp_retries2)
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
-               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
-                                         TCP_RTO_MAX);
+               probe_max = TCP_RTO_MAX;
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember icsk_probes_out.
@@ -3306,11 +3305,11 @@ void tcp_send_probe0(struct sock *sk)
                 */
                if (!icsk->icsk_probes_out)
                        icsk->icsk_probes_out = 1;
-               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                         min(icsk->icsk_rto << icsk->icsk_backoff,
-                                             TCP_RESOURCE_PROBE_INTERVAL),
-                                         TCP_RTO_MAX);
+               probe_max = TCP_RESOURCE_PROBE_INTERVAL;
        }
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+                                 inet_csk_rto_backoff(icsk, probe_max),
+                                 TCP_RTO_MAX);
 }
 
 int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
index a339e7ba05a434954744b6716a7d57be0400066c..b24360f6e29351476a8931df6659782e1a356f2f 100644 (file)
@@ -180,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk)
 
                retry_until = sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
-                       const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
+                       const int alive = icsk->icsk_rto < TCP_RTO_MAX;
 
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
@@ -294,7 +294,7 @@ static void tcp_probe_timer(struct sock *sk)
        max_probes = sysctl_tcp_retries2;
 
        if (sock_flag(sk, SOCK_DEAD)) {
-               const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
+               const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
                max_probes = tcp_orphan_retries(sk, alive);