Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[powerpc.git] / net / ipv4 / tcp_output.c
index 5c08ea2..ca40615 100644 (file)
 #include <linux/smp_lock.h>
 
 /* People can turn this off for buggy TCP's found in printers etc. */
-int sysctl_tcp_retrans_collapse = 1;
+int sysctl_tcp_retrans_collapse __read_mostly = 1;
 
 /* People can turn this on to  work with those rare, broken TCPs that
  * interpret the window field as a signed quantity.
  */
-int sysctl_tcp_workaround_signed_windows = 0;
+int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
 
 /* This limits the percentage of the congestion window which we
  * will allow a single TSO frame to consume.  Building TSO frames
  * which are too large can cause TCP streams to be bursty.
  */
-int sysctl_tcp_tso_win_divisor = 3;
+int sysctl_tcp_tso_win_divisor __read_mostly = 3;
 
-int sysctl_tcp_mtu_probing = 0;
-int sysctl_tcp_base_mss = 512;
+int sysctl_tcp_mtu_probing __read_mostly = 0;
+int sysctl_tcp_base_mss __read_mostly = 512;
 
 /* By default, RFC2861 behavior.  */
-int sysctl_tcp_slow_start_after_idle = 1;
+int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
                             struct sk_buff *skb)
@@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
                 * See RFC1323 for an explanation of the limit to 14 
                 */
                space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
+               space = min_t(u32, space, *window_clamp);
                while (space > 65535 && (*rcv_wscale) < 14) {
                        space >>= 1;
                        (*rcv_wscale)++;
@@ -268,14 +269,14 @@ static u16 tcp_select_window(struct sock *sk)
        return new_win;
 }
 
-static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
+static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
                                         __u32 tstamp)
 {
        if (tp->rx_opt.tstamp_ok) {
-               *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
-                                         (TCPOPT_NOP << 16) |
-                                         (TCPOPT_TIMESTAMP << 8) |
-                                         TCPOLEN_TIMESTAMP);
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_NOP << 16) |
+                              (TCPOPT_TIMESTAMP << 8) |
+                              TCPOLEN_TIMESTAMP);
                *ptr++ = htonl(tstamp);
                *ptr++ = htonl(tp->rx_opt.ts_recent);
        }
@@ -304,7 +305,7 @@ static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
  * MAX_SYN_SIZE to match the new maximum number of options that you
  * can generate.
  */
-static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
+static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
                                  int offer_wscale, int wscale, __u32 tstamp,
                                  __u32 ts_recent)
 {
@@ -324,18 +325,27 @@ static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
        *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
        if (ts) {
                if(sack)
-                       *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
-                                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+                       *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
+                                      (TCPOLEN_SACK_PERM << 16) |
+                                      (TCPOPT_TIMESTAMP << 8) |
+                                      TCPOLEN_TIMESTAMP);
                else
-                       *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-                                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+                       *ptr++ = htonl((TCPOPT_NOP << 24) |
+                                      (TCPOPT_NOP << 16) |
+                                      (TCPOPT_TIMESTAMP << 8) |
+                                      TCPOLEN_TIMESTAMP);
                *ptr++ = htonl(tstamp);         /* TSVAL */
                *ptr++ = htonl(ts_recent);      /* TSECR */
        } else if(sack)
-               *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-                                         (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_NOP << 16) |
+                              (TCPOPT_SACK_PERM << 8) |
+                              TCPOLEN_SACK_PERM);
        if (offer_wscale)
-               *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_WINDOW << 16) |
+                              (TCPOLEN_WINDOW << 8) |
+                              (wscale));
 }
 
 /* This routine actually transmits TCP packets queued in by
@@ -423,7 +433,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        th->dest                = inet->dport;
        th->seq                 = htonl(tcb->seq);
        th->ack_seq             = htonl(tp->rcv_nxt);
-       *(((__u16 *)th) + 6)    = htons(((tcp_header_size >> 2) << 12) |
+       *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->flags);
 
        if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
@@ -444,7 +454,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        }
 
        if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
-               tcp_syn_build_options((__u32 *)(th + 1),
+               tcp_syn_build_options((__be32 *)(th + 1),
                                      tcp_advertise_mss(sk),
                                      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
                                      (sysctl_flags & SYSCTL_FLAG_SACK),
@@ -453,7 +463,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                      tcb->when,
                                      tp->rx_opt.ts_recent);
        } else {
-               tcp_build_and_update_options((__u32 *)(th + 1),
+               tcp_build_and_update_options((__be32 *)(th + 1),
                                             tp, tcb->when);
                TCP_ECN_send(sk, tp, skb, tcp_header_size);
        }
@@ -466,7 +476,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (skb->len != tcp_header_size)
                tcp_event_data_sent(tp, skb, sk);
 
-       TCP_INC_STATS(TCP_MIB_OUTSEGS);
+       if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
+               TCP_INC_STATS(TCP_MIB_OUTSEGS);
 
        err = icsk->icsk_af_ops->queue_xmit(skb, 0);
        if (likely(err <= 0))
@@ -575,7 +586,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
        TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
        TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
 
-       if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
+       if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
                /* Copy and checksum data tail into the new buffer. */
                buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
                                                       nsize, 0);
@@ -584,7 +595,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
 
                skb->csum = csum_block_sub(skb->csum, buff->csum, len);
        } else {
-               skb->ip_summed = CHECKSUM_HW;
+               skb->ip_summed = CHECKSUM_PARTIAL;
                skb_split(skb, buff, len);
        }
 
@@ -687,7 +698,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
                __pskb_trim_head(skb, len - skb_headlen(skb));
 
        TCP_SKB_CB(skb)->seq += len;
-       skb->ip_summed = CHECKSUM_HW;
+       skb->ip_summed = CHECKSUM_PARTIAL;
 
        skb->truesize        -= len;
        sk->sk_wmem_queued   -= len;
@@ -1060,7 +1071,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
        /* This packet was never sent out yet, so no SACK bits. */
        TCP_SKB_CB(buff)->sacked = 0;
 
-       buff->ip_summed = skb->ip_summed = CHECKSUM_HW;
+       buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
        skb_split(skb, buff, len);
 
        /* Fix up tso_factor for both original and new SKB.  */
@@ -1085,10 +1096,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
        u32 send_win, cong_win, limit, in_flight;
 
        if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
-               return 0;
+               goto send_now;
 
        if (icsk->icsk_ca_state != TCP_CA_Open)
-               return 0;
+               goto send_now;
+
+       /* Defer for less than two clock ticks. */
+       if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
+               goto send_now;
 
        in_flight = tcp_packets_in_flight(tp);
 
@@ -1104,7 +1119,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
 
        /* If a full-sized TSO skb can be sent, do it. */
        if (limit >= 65536)
-               return 0;
+               goto send_now;
 
        if (sysctl_tcp_tso_win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1114,7 +1129,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
                 */
                chunk /= sysctl_tcp_tso_win_divisor;
                if (limit >= chunk)
-                       return 0;
+                       goto send_now;
        } else {
                /* Different approach, try not to defer past a single
                 * ACK.  Receiver should ACK every other full sized
@@ -1122,11 +1137,17 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
                 * then send now.
                 */
                if (limit > tcp_max_burst(tp) * tp->mss_cache)
-                       return 0;
+                       goto send_now;
        }
 
        /* Ok, it looks like it is advisable to defer.  */
+       tp->tso_deferred = 1 | (jiffies<<1);
+
        return 1;
+
+send_now:
+       tp->tso_deferred = 0;
+       return 0;
 }
 
 /* Create a new MTU probe if we are ready.
@@ -1204,8 +1225,7 @@ static int tcp_mtu_probe(struct sock *sk)
        TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
        TCP_SKB_CB(nskb)->sacked = 0;
        nskb->csum = 0;
-       if (skb->ip_summed == CHECKSUM_HW)
-               nskb->ip_summed = CHECKSUM_HW;
+       nskb->ip_summed = skb->ip_summed;
 
        len = 0;
        while (len < probe_size) {
@@ -1229,7 +1249,7 @@ static int tcp_mtu_probe(struct sock *sk)
                                                   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
                        if (!skb_shinfo(skb)->nr_frags) {
                                skb_pull(skb, copy);
-                               if (skb->ip_summed != CHECKSUM_HW)
+                               if (skb->ip_summed != CHECKSUM_PARTIAL)
                                        skb->csum = csum_partial(skb->data, skb->len, 0);
                        } else {
                                __pskb_trim_head(skb, copy);
@@ -1570,10 +1590,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
 
                memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
 
-               if (next_skb->ip_summed == CHECKSUM_HW)
-                       skb->ip_summed = CHECKSUM_HW;
+               skb->ip_summed = next_skb->ip_summed;
 
-               if (skb->ip_summed != CHECKSUM_HW)
+               if (skb->ip_summed != CHECKSUM_PARTIAL)
                        skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
 
                /* Update sequence range on original skb. */
@@ -2070,7 +2089,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        th->window = htons(req->rcv_wnd);
 
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
-       tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
+       tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
                              ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
                              TCP_SKB_CB(skb)->when,
                              req->ts_recent);
@@ -2157,10 +2176,9 @@ int tcp_connect(struct sock *sk)
        skb_shinfo(buff)->gso_size = 0;
        skb_shinfo(buff)->gso_type = 0;
        buff->csum = 0;
+       tp->snd_nxt = tp->write_seq;
        TCP_SKB_CB(buff)->seq = tp->write_seq++;
        TCP_SKB_CB(buff)->end_seq = tp->write_seq;
-       tp->snd_nxt = tp->write_seq;
-       tp->pushed_seq = tp->write_seq;
 
        /* Send it off. */
        TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2170,6 +2188,12 @@ int tcp_connect(struct sock *sk)
        sk_charge_skb(sk, buff);
        tp->packets_out += tcp_skb_pcount(buff);
        tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
+
+       /* We change tp->snd_nxt after the tcp_transmit_skb() call
+        * in order to make this packet get counted in tcpOutSegs.
+        */
+       tp->snd_nxt = tp->write_seq;
+       tp->pushed_seq = tp->write_seq;
        TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
 
        /* Timer for repeating the SYN until an answer. */