[TCP]: whitespace cleanup
[powerpc.git] / net / ipv4 / tcp_input.c
index c26076f..fb02560 100644 (file)
@@ -50,9 +50,9 @@
  *             Andi Kleen:             Make sure we never ack data there is not
  *                                     enough room for. Also make this condition
  *                                     a fatal error if it might still happen.
- *             Andi Kleen:             Add tcp_measure_rcv_mss to make 
+ *             Andi Kleen:             Add tcp_measure_rcv_mss to make
  *                                     connections with MSS<min(MTU,ann. MSS)
- *                                     work without delayed acks. 
+ *                                     work without delayed acks.
  *             Andi Kleen:             Process packets with PSH set in the
  *                                     fast path.
  *             J Hadi Salim:           ECN support
@@ -86,6 +86,7 @@ int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly;
+int sysctl_tcp_frto_response __read_mostly;
 int sysctl_tcp_nometrics_save __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
@@ -100,6 +101,7 @@ int sysctl_tcp_abc __read_mostly;
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
 #define FLAG_DATA_LOST         0x80 /* SACK detected data lossage.             */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
+#define FLAG_ONLY_ORIG_SACKED  0x200 /* SACKs only non-rexmit sent before RTO */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -110,19 +112,21 @@ int sysctl_tcp_abc __read_mostly;
 #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
 #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
 
+#define IsSackFrto() (sysctl_tcp_frto == 0x2)
+
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 
-/* Adapt the MSS value used to make delayed ack decision to the 
+/* Adapt the MSS value used to make delayed ack decision to the
  * real world.
- */ 
+ */
 static void tcp_measure_rcv_mss(struct sock *sk,
                                const struct sk_buff *skb)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       const unsigned int lss = icsk->icsk_ack.last_seg_size; 
+       const unsigned int lss = icsk->icsk_ack.last_seg_size;
        unsigned int len;
 
-       icsk->icsk_ack.last_seg_size = 0; 
+       icsk->icsk_ack.last_seg_size = 0;
 
        /* skb->len may jitter because of SACKs, even if peer
         * sends good full-sized frames.
@@ -440,15 +444,15 @@ void tcp_rcv_space_adjust(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int time;
        int space;
-       
+
        if (tp->rcvq_space.time == 0)
                goto new_measure;
-       
+
        time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) ||
            tp->rcv_rtt_est.rtt == 0)
                return;
-       
+
        space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
 
        space = max(tp->rcvq_space.space, space);
@@ -483,7 +487,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                        }
                }
        }
-       
+
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
        tp->rcvq_space.time = tcp_time_stamp;
@@ -509,7 +513,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
        tcp_measure_rcv_mss(sk, skb);
 
        tcp_rcv_rtt_measure(tp);
-       
+
        now = tcp_time_stamp;
 
        if (!icsk->icsk_ack.ato) {
@@ -561,7 +565,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
        /*      The following amusing code comes from Jacobson's
         *      article in SIGCOMM '88.  Note that rtt and mdev
         *      are scaled versions of rtt and mean deviation.
-        *      This is designed to be as fast as possible 
+        *      This is designed to be as fast as possible
         *      m stands for "measurement".
         *
         *      On a 1990 paper the rto value is changed to:
@@ -574,7 +578,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
         * does not matter how to _calculate_ it. Seems, it was trap
         * that VJ failed to avoid. 8)
         */
-       if(m == 0)
+       if (m == 0)
                m = 1;
        if (tp->srtt != 0) {
                m -= (tp->srtt >> 3);   /* m is now error in rtt est */
@@ -759,15 +763,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
 }
 
 /* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk)
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
 
        tp->prior_ssthresh = 0;
        tp->bytes_acked = 0;
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (icsk->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+               if (set_ssthresh)
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tp->snd_cwnd = min(tp->snd_cwnd,
                                   tcp_packets_in_flight(tp) + 1U);
                tp->snd_cwnd_cnt = 0;
@@ -936,28 +942,58 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
        struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+       struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
        int reord = tp->packets_out;
        int prior_fackets;
        u32 lost_retrans = 0;
        int flag = 0;
        int dup_sack = 0;
+       int cached_fack_count;
        int i;
+       int first_sack_index;
 
        if (!tp->sacked_out)
                tp->fackets_out = 0;
        prior_fackets = tp->fackets_out;
 
+       /* Check for D-SACK. */
+       if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+               dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+       } else if (num_sacks > 1 &&
+                       !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
+                       !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+               dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+       }
+
+       /* D-SACK for already forgotten data...
+        * Do dumb counting. */
+       if (dup_sack &&
+                       !after(ntohl(sp[0].end_seq), prior_snd_una) &&
+                       after(ntohl(sp[0].end_seq), tp->undo_marker))
+               tp->undo_retrans--;
+
+       /* Eliminate too old ACKs, but take into
+        * account more or less fresh ones, they can
+        * contain valid SACK info.
+        */
+       if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+               return 0;
+
        /* SACK fastpath:
         * if the only SACK change is the increase of the end_seq of
         * the first block then only apply that SACK block
         * and use retrans queue hinting otherwise slowpath */
        flag = 1;
-       for (i = 0; i< num_sacks; i++) {
-               __u32 start_seq = ntohl(sp[i].start_seq);
-               __u32 end_seq =  ntohl(sp[i].end_seq);
+       for (i = 0; i < num_sacks; i++) {
+               __be32 start_seq = sp[i].start_seq;
+               __be32 end_seq = sp[i].end_seq;
 
-               if (i == 0){
+               if (i == 0) {
                        if (tp->recv_sack_cache[i].start_seq != start_seq)
                                flag = 0;
                } else {
@@ -967,39 +1003,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
                tp->recv_sack_cache[i].start_seq = start_seq;
                tp->recv_sack_cache[i].end_seq = end_seq;
-
-               /* Check for D-SACK. */
-               if (i == 0) {
-                       u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
-
-                       if (before(start_seq, ack)) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
-                       } else if (num_sacks > 1 &&
-                                  !after(end_seq, ntohl(sp[1].end_seq)) &&
-                                  !before(start_seq, ntohl(sp[1].start_seq))) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
-                       }
-
-                       /* D-SACK for already forgotten data...
-                        * Do dumb counting. */
-                       if (dup_sack &&
-                           !after(end_seq, prior_snd_una) &&
-                           after(end_seq, tp->undo_marker))
-                               tp->undo_retrans--;
-
-                       /* Eliminate too old ACKs, but take into
-                        * account more or less fresh ones, they can
-                        * contain valid SACK info.
-                        */
-                       if (before(ack, prior_snd_una - tp->max_window))
-                               return 0;
-               }
+       }
+       /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
+       for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
+               tp->recv_sack_cache[i].start_seq = 0;
+               tp->recv_sack_cache[i].end_seq = 0;
        }
 
+       first_sack_index = 0;
        if (flag)
                num_sacks = 1;
        else {
@@ -1016,6 +1027,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                        tmp = sp[j];
                                        sp[j] = sp[j+1];
                                        sp[j+1] = tmp;
+
+                                       /* Track where the first SACK block goes to */
+                                       if (j == first_sack_index)
+                                               first_sack_index = j+1;
                                }
 
                        }
@@ -1025,31 +1040,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        /* clear flag as used for different purpose in following code */
        flag = 0;
 
+       /* Use SACK fastpath hint if valid */
+       cached_skb = tp->fastpath_skb_hint;
+       cached_fack_count = tp->fastpath_cnt_hint;
+       if (!cached_skb) {
+               cached_skb = tcp_write_queue_head(sk);
+               cached_fack_count = 0;
+       }
+
        for (i=0; i<num_sacks; i++, sp++) {
                struct sk_buff *skb;
                __u32 start_seq = ntohl(sp->start_seq);
                __u32 end_seq = ntohl(sp->end_seq);
                int fack_count;
 
-               /* Use SACK fastpath hint if valid */
-               if (tp->fastpath_skb_hint) {
-                       skb = tp->fastpath_skb_hint;
-                       fack_count = tp->fastpath_cnt_hint;
-               } else {
-                       skb = sk->sk_write_queue.next;
-                       fack_count = 0;
-               }
+               skb = cached_skb;
+               fack_count = cached_fack_count;
 
                /* Event "B" in the comment above. */
                if (after(end_seq, tp->high_seq))
                        flag |= FLAG_DATA_LOST;
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
                        int in_sack, pcount;
                        u8 sacked;
 
-                       tp->fastpath_skb_hint = skb;
-                       tp->fastpath_cnt_hint = fack_count;
+                       if (skb == tcp_send_head(sk))
+                               break;
+
+                       cached_skb = skb;
+                       cached_fack_count = fack_count;
+                       if (i == first_sack_index) {
+                               tp->fastpath_skb_hint = skb;
+                               tp->fastpath_cnt_hint = fack_count;
+                       }
 
                        /* The retransmission queue is always in order, so
                         * we can short-circuit the walk early.
@@ -1144,6 +1168,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                                /* clear lost hint */
                                                tp->retransmit_skb_hint = NULL;
                                        }
+                                       /* SACK enhanced F-RTO detection.
+                                        * Set flag if and only if non-rexmitted
+                                        * segments below frto_highmark are
+                                        * SACKed (RFC4138; Appendix B).
+                                        * Clearing correct due to in-order walk
+                                        */
+                                       if (after(end_seq, tp->frto_highmark)) {
+                                               flag &= ~FLAG_ONLY_ORIG_SACKED;
+                                       } else {
+                                               if (!(sacked & TCPCB_RETRANS))
+                                                       flag |= FLAG_ONLY_ORIG_SACKED;
+                                       }
                                }
 
                                TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
@@ -1180,7 +1216,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
                struct sk_buff *skb;
 
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
                                break;
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
@@ -1209,7 +1247,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 
        tp->left_out = tp->sacked_out + tp->lost_out;
 
-       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
+       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
+           (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
                tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
 
 #if FASTRETRANS_DEBUG > 0
@@ -1221,9 +1260,54 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        return flag;
 }
 
-/* RTO occurred, but do not yet enter loss state. Instead, transmit two new
- * segments to see from the next ACKs whether any data was really missing.
- * If the RTO was spurious, new ACKs should arrive.
+/* F-RTO can only be used if these conditions are satisfied:
+ *  - there must be some unsent new data
+ *  - the advertised window should allow sending it
+ *  - TCP has never retransmitted anything other than head (SACK enhanced
+ *    variant from Appendix B of RFC4138 is more robust here)
+ */
+int tcp_use_frto(struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!sysctl_tcp_frto || !tcp_send_head(sk) ||
+               after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
+                     tp->snd_una + tp->snd_wnd))
+               return 0;
+
+       if (IsSackFrto())
+               return 1;
+
+       /* Avoid expensive walking of rexmit queue if possible */
+       if (tp->retrans_out > 1)
+               return 0;
+
+       skb = tcp_write_queue_head(sk);
+       skb = tcp_write_queue_next(sk, skb);    /* Skips head */
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
+                       return 0;
+               /* Short-circuit when first non-SACKed skb has been checked */
+               if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
+                       break;
+       }
+       return 1;
+}
+
+/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
+ * recovery a bit and use heuristics in tcp_process_frto() to detect if
+ * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
+ * keep retrans_out counting accurate (with SACK F-RTO, other than head
+ * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
+ * bits are handled if the Loss state is really to be entered (in
+ * tcp_enter_frto_loss).
+ *
+ * Do like tcp_enter_loss() would; when RTO expires the second time it
+ * does:
+ *  "Reduce ssthresh if it has not yet been made inside this window."
  */
 void tcp_enter_frto(struct sock *sk)
 {
@@ -1231,39 +1315,69 @@ void tcp_enter_frto(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
-       tp->frto_counter = 1;
-
-       if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
-            tp->snd_una == tp->high_seq ||
-            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
+       if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
+           tp->snd_una == tp->high_seq ||
+           ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
+            !icsk->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               /* Our state is too optimistic in ssthresh() call because cwnd
+                * is not reduced until tcp_enter_frto_loss() when previous FRTO
+                * recovery has not yet completed. Pattern would be this: RTO,
+                * Cumulative ACK, RTO (2xRTO for the same segment does not end
+                * up here twice).
+                * RFC4138 should be more specific on what to do, even though
+                * RTO is quite unlikely to occur after the first Cumulative ACK
+                * due to back-off and complexity of triggering events ...
+                */
+               if (tp->frto_counter) {
+                       u32 stored_cwnd;
+                       stored_cwnd = tp->snd_cwnd;
+                       tp->snd_cwnd = 2;
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+                       tp->snd_cwnd = stored_cwnd;
+               } else {
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               }
+               /* ... in theory, cong.control module could do "any tricks" in
+                * ssthresh(), which means that ca_state, lost bits and lost_out
+                * counter would have to be faked before the call occurs. We
+                * consider that too expensive, unlikely and hacky, so modules
+                * using these in ssthresh() must deal these incompatibility
+                * issues if they receives CA_EVENT_FRTO and frto_counter != 0
+                */
                tcp_ca_event(sk, CA_EVENT_FRTO);
        }
 
-       /* Have to clear retransmission markers here to keep the bookkeeping
-        * in shape, even though we are not yet in Loss state.
-        * If something was really lost, it is eventually caught up
-        * in tcp_enter_frto_loss.
-        */
-       tp->retrans_out = 0;
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
+       skb = tcp_write_queue_head(sk);
+       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= tcp_skb_pcount(skb);
        }
        tcp_sync_left_out(tp);
 
-       tcp_set_ca_state(sk, TCP_CA_Open);
-       tp->frto_highmark = tp->snd_nxt;
+       /* Earlier loss recovery underway (see RFC4138; Appendix B).
+        * The last condition is necessary at least in tp->frto_counter case.
+        */
+       if (IsSackFrto() && (tp->frto_counter ||
+           ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
+           after(tp->high_seq, tp->snd_una)) {
+               tp->frto_highmark = tp->high_seq;
+       } else {
+               tp->frto_highmark = tp->snd_nxt;
+       }
+       tcp_set_ca_state(sk, TCP_CA_Disorder);
+       tp->high_seq = tp->snd_nxt;
+       tp->frto_counter = 1;
 }
 
 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
  * which indicates that we should follow the traditional RTO recovery,
  * i.e. mark everything lost and do go-back-N retransmission.
  */
-static void tcp_enter_frto_loss(struct sock *sk)
+static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1272,10 +1386,23 @@ static void tcp_enter_frto_loss(struct sock *sk)
        tp->sacked_out = 0;
        tp->lost_out = 0;
        tp->fackets_out = 0;
+       tp->retrans_out = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+               /*
+                * Count the retransmission made on RTO correctly (only when
+                * waiting for the first ACK and did not get it)...
+                */
+               if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
+                       tp->retrans_out += tcp_skb_pcount(skb);
+                       /* ...enter this if branch just for the first segment */
+                       flag |= FLAG_DATA_ACKED;
+               } else {
+                       TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+               }
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
 
                        /* Do not mark those segments lost that were
@@ -1293,7 +1420,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
        }
        tcp_sync_left_out(tp);
 
-       tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->undo_marker = 0;
@@ -1351,7 +1478,9 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (!how)
                tp->undo_marker = tp->snd_una;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
                        tp->undo_marker = 0;
@@ -1386,14 +1515,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
         * receiver _host_ is heavily congested (or buggy).
         * Do processing similar to RTO timeout.
         */
-       if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
+       if ((skb = tcp_write_queue_head(sk)) != NULL &&
            (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
-               tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          icsk->icsk_rto, TCP_RTO_MAX);
                return 1;
@@ -1414,7 +1543,7 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
 static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
 {
        return tp->packets_out &&
-              tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
+              tcp_skb_timedout(sk, tcp_write_queue_head(sk));
 }
 
 /* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1514,6 +1643,10 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
 {
        __u32 packets_out;
 
+       /* Do not perform any recovery during FRTO algorithm */
+       if (tp->frto_counter)
+               return 0;
+
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
                return 1;
@@ -1605,11 +1738,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
        } else {
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                cnt = 0;
        }
 
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                /* TODO: do this better */
                /* this is not the most efficient way to do this... */
                tp->lost_skb_hint = skb;
@@ -1623,12 +1758,11 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
                        /* clear xmit_retransmit_queue hints
                         *  if this is beyond hint */
-                       if(tp->retransmit_skb_hint != NULL &&
-                          before(TCP_SKB_CB(skb)->seq,
-                                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
-
+                       if (tp->retransmit_skb_hint != NULL &&
+                           before(TCP_SKB_CB(skb)->seq,
+                                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                                tp->retransmit_skb_hint = NULL;
-                       }
+
                }
        }
        tcp_sync_left_out(tp);
@@ -1656,9 +1790,11 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : sk->sk_write_queue.next;
+                       : tcp_write_queue_head(sk);
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (!tcp_skb_timedout(sk, skb))
                                break;
 
@@ -1849,7 +1985,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
 {
        if (tcp_may_undo(tp)) {
                struct sk_buff *skb;
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                }
 
@@ -1885,7 +2023,7 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
                tp->retrans_stamp = 0;
 
        if (flag&FLAG_ECE)
-               tcp_enter_cwr(sk);
+               tcp_enter_cwr(sk, 1);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                int state = TCP_CA_Open;
@@ -1954,11 +2092,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
         * 1. Reno does not count dupacks (sacked_out) automatically. */
        if (!tp->packets_out)
                tp->sacked_out = 0;
-        /* 2. SACK counts snd_fack in packets inaccurately. */
+       /* 2. SACK counts snd_fack in packets inaccurately. */
        if (tp->sacked_out == 0)
                tp->fackets_out = 0;
 
-        /* Now state machine starts.
+       /* Now state machine starts.
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
        if (flag&FLAG_ECE)
                tp->prior_ssthresh = 0;
@@ -1982,8 +2120,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
        /* E. Check state exit conditions. State can be terminated
         *    when high_seq is ACKed. */
        if (icsk->icsk_ca_state == TCP_CA_Open) {
-               if (!sysctl_tcp_frto)
-                       BUG_TRAP(tp->retrans_out == 0);
+               BUG_TRAP(tp->retrans_out == 0);
                tp->retrans_stamp = 0;
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (icsk->icsk_ca_state) {
@@ -2188,7 +2325,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
                         __u32 now, __s32 *seq_rtt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
        __u32 seq = tp->snd_una;
        __u32 packets_acked;
        int acked = 0;
@@ -2262,9 +2399,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                = icsk->icsk_ca_ops->rtt_sample;
        struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
 
-       while ((skb = skb_peek(&sk->sk_write_queue)) &&
-              skb != sk->sk_send_head) {
-               struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+       while ((skb = tcp_write_queue_head(sk)) &&
+              skb != tcp_send_head(sk)) {
+               struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
                __u8 sacked = scb->sacked;
 
                /* If our packet is before the ack sequence we can
@@ -2303,7 +2440,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 
                if (sacked) {
                        if (sacked & TCPCB_RETRANS) {
-                               if(sacked & TCPCB_SACKED_RETRANS)
+                               if (sacked & TCPCB_SACKED_RETRANS)
                                        tp->retrans_out -= tcp_skb_pcount(skb);
                                acked |= FLAG_RETRANS_DATA_ACKED;
                                seq_rtt = -1;
@@ -2326,7 +2463,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                }
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
                sk_stream_free_skb(sk, skb);
                clear_all_retrans_hints(tp);
        }
@@ -2375,7 +2512,7 @@ static void tcp_ack_probe(struct sock *sk)
 
        /* Was it a usable window open? */
 
-       if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
                   tp->snd_una + tp->snd_wnd)) {
                icsk->icsk_backoff = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
@@ -2452,39 +2589,128 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
        return flag;
 }
 
-static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
+/* A very conservative spurious RTO response algorithm: reduce cwnd and
+ * continue in congestion avoidance.
+ */
+static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
+{
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+       tp->snd_cwnd_cnt = 0;
+       tcp_moderate_cwnd(tp);
+}
+
+/* A conservative spurious RTO response algorithm: reduce cwnd using
+ * rate halving and continue in congestion avoidance.
+ */
+static void tcp_ratehalving_spur_to_response(struct sock *sk)
+{
+       tcp_enter_cwr(sk, 0);
+}
+
+static void tcp_undo_spur_to_response(struct sock *sk, int flag)
+{
+       if (flag&FLAG_ECE)
+               tcp_ratehalving_spur_to_response(sk);
+       else
+               tcp_undo_cwr(sk, 1);
+}
+
+/* F-RTO spurious RTO detection algorithm (RFC4138)
+ *
+ * F-RTO affects during two new ACKs following RTO (well, almost, see inline
+ * comments). State (ACK number) is kept in frto_counter. When ACK advances
+ * window (but not to or beyond highest sequence sent before RTO):
+ *   On First ACK,  send two new segments out.
+ *   On Second ACK, RTO was likely spurious. Do spurious response (response
+ *                  algorithm is not part of the F-RTO detection algorithm
+ *                  given in RFC4138 but can be selected separately).
+ * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
+ * and TCP falls back to conventional RTO recovery.
+ *
+ * Rationale: if the RTO was spurious, new ACKs should arrive from the
+ * original window even after we transmit two new data segments.
+ *
+ * SACK version:
+ *   on first step, wait until first cumulative ACK arrives, then move to
+ *   the second step. In second step, the next ACK decides.
+ *
+ * F-RTO is implemented (mainly) in four functions:
+ *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
+ *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
+ *     called when tcp_use_frto() showed green light
+ *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
+ *   - tcp_enter_frto_loss() is called if there is not enough evidence
+ *     to prove that the RTO is indeed spurious. It transfers the control
+ *     from F-RTO to the conventional RTO recovery
+ */
+static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       
+
        tcp_sync_left_out(tp);
-       
-       if (tp->snd_una == prior_snd_una ||
-           !before(tp->snd_una, tp->frto_highmark)) {
-               /* RTO was caused by loss, start retransmitting in
-                * go-back-N slow start
-                */
-               tcp_enter_frto_loss(sk);
-               return;
+
+       /* Duplicate the behavior from Loss state (fastretrans_alert) */
+       if (flag&FLAG_DATA_ACKED)
+               inet_csk(sk)->icsk_retransmits = 0;
+
+       if (!before(tp->snd_una, tp->frto_highmark)) {
+               tcp_enter_frto_loss(sk, tp->frto_counter + 1, flag);
+               return 1;
        }
 
-       if (tp->frto_counter == 1) {
-               /* First ACK after RTO advances the window: allow two new
-                * segments out.
+       if (!IsSackFrto() || IsReno(tp)) {
+               /* RFC4138 shortcoming in step 2; should also have case c):
+                * ACK isn't duplicate nor advances window, e.g., opposite dir
+                * data, winupdate
                 */
-               tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+               if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
+                   !(flag&FLAG_FORWARD_PROGRESS))
+                       return 1;
+
+               if (!(flag&FLAG_DATA_ACKED)) {
+                       tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
+                                           flag);
+                       return 1;
+               }
        } else {
-               /* Also the second ACK after RTO advances the window.
-                * The RTO was likely spurious. Reduce cwnd and continue
-                * in congestion avoidance
-                */
-               tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-               tcp_moderate_cwnd(tp);
+               if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       /* Prevent sending of new data. */
+                       tp->snd_cwnd = min(tp->snd_cwnd,
+                                          tcp_packets_in_flight(tp));
+                       return 1;
+               }
+
+               if ((tp->frto_counter == 2) &&
+                   (!(flag&FLAG_FORWARD_PROGRESS) ||
+                    ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
+                       /* RFC4138 shortcoming (see comment above) */
+                       if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
+                               return 1;
+
+                       tcp_enter_frto_loss(sk, 3, flag);
+                       return 1;
+               }
        }
 
-       /* F-RTO affects on two new ACKs following RTO.
-        * At latest on third ACK the TCP behavior is back to normal.
-        */
-       tp->frto_counter = (tp->frto_counter + 1) % 3;
+       if (tp->frto_counter == 1) {
+               tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+               tp->frto_counter = 2;
+               return 1;
+       } else /* frto_counter == 2 */ {
+               switch (sysctl_tcp_frto_response) {
+               case 2:
+                       tcp_undo_spur_to_response(sk, flag);
+                       break;
+               case 1:
+                       tcp_conservative_spur_to_response(tp);
+                       break;
+               default:
+                       tcp_ratehalving_spur_to_response(sk);
+                       break;
+               };
+               tp->frto_counter = 0;
+       }
+       return 0;
 }
 
 /* This routine deals with incoming acks, but not outgoing ones. */
@@ -2498,6 +2724,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        u32 prior_in_flight;
        s32 seq_rtt;
        int prior_packets;
+       int frto_cwnd = 0;
 
        /* If the ack is newer than sent or older than previous acks
         * then we can probably ignore it.
@@ -2560,15 +2787,16 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
 
        if (tp->frto_counter)
-               tcp_process_frto(sk, prior_snd_una);
+               frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
-               if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
+                   tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
                tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
        } else {
-               if ((flag & FLAG_DATA_ACKED))
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
        }
 
@@ -2584,7 +2812,7 @@ no_queue:
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
         */
-       if (sk->sk_send_head)
+       if (tcp_send_head(sk))
                tcp_ack_probe(sk);
        return 1;
 
@@ -2611,8 +2839,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
        ptr = (unsigned char *)(th + 1);
        opt_rx->saw_tstamp = 0;
 
-       while(length>0) {
-               int opcode=*ptr++;
+       while (length > 0) {
+               int opcode=*ptr++;
                int opsize;
 
                switch (opcode) {
@@ -2627,9 +2855,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        return;
                                if (opsize > length)
                                        return; /* don't parse partial options */
-                               switch(opcode) {
+                               switch (opcode) {
                                case TCPOPT_MSS:
-                                       if(opsize==TCPOLEN_MSS && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_MSS && th->syn && !estab) {
                                                u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
                                                if (in_mss) {
                                                        if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
@@ -2639,12 +2867,12 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_WINDOW:
-                                       if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
+                                       if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
                                                if (sysctl_tcp_window_scaling) {
                                                        __u8 snd_wscale = *(__u8 *) ptr;
                                                        opt_rx->wscale_ok = 1;
                                                        if (snd_wscale > 14) {
-                                                               if(net_ratelimit())
+                                                               if (net_ratelimit())
                                                                        printk(KERN_INFO "tcp_parse_options: Illegal window "
                                                                               "scaling value %d >14 received.\n",
                                                                               snd_wscale);
@@ -2654,7 +2882,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                                }
                                        break;
                                case TCPOPT_TIMESTAMP:
-                                       if(opsize==TCPOLEN_TIMESTAMP) {
+                                       if (opsize==TCPOLEN_TIMESTAMP) {
                                                if ((estab && opt_rx->tstamp_ok) ||
                                                    (!estab && sysctl_tcp_timestamps)) {
                                                        opt_rx->saw_tstamp = 1;
@@ -2664,7 +2892,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_SACK_PERM:
-                                       if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
                                                if (sysctl_tcp_sack) {
                                                        opt_rx->sack_ok = 1;
                                                        tcp_sack_reset(opt_rx);
@@ -2673,7 +2901,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        break;
 
                                case TCPOPT_SACK:
-                                       if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+                                       if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
                                           !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
                                           opt_rx->sack_ok) {
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
@@ -2686,10 +2914,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                         */
                                        break;
 #endif
-                               };
-                               ptr+=opsize-2;
-                               length-=opsize;
-               };
+                               };
+                               ptr+=opsize-2;
+                               length-=opsize;
+               };
        }
 }
 
@@ -2722,7 +2950,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
 {
        tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
 }
 
 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -2735,8 +2963,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
+                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
                        tcp_store_ts_recent(tp);
        }
 }
@@ -2788,7 +3016,7 @@ static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
+               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
                !tcp_disordered_ack(sk, skb));
 }
 
@@ -2994,7 +3222,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         */
                        tp->rx_opt.num_sacks--;
                        tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-                       for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
+                       for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i+1];
                        continue;
                }
@@ -3047,7 +3275,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.num_sacks--;
                sp--;
        }
-       for(; this_sack > 0; this_sack--, sp--)
+       for (; this_sack > 0; this_sack--, sp--)
                *sp = *(sp-1);
 
 new_sack:
@@ -3073,7 +3301,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                return;
        }
 
-       for(this_sack = 0; this_sack < num_sacks; ) {
+       for (this_sack = 0; this_sack < num_sacks; ) {
                /* Check if the start of the sack is covered by RCV.NXT. */
                if (!before(tp->rcv_nxt, sp->start_seq)) {
                        int i;
@@ -3129,7 +3357,7 @@ static void tcp_ofo_queue(struct sock *sk)
                __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->h.th->fin)
+               if (skb->h.th->fin)
                        tcp_fin(skb, sk, skb->h.th);
        }
 }
@@ -3195,9 +3423,9 @@ queue_and_out:
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->len)
+               if (skb->len)
                        tcp_event_data_recv(sk, tp, skb);
-               if(th->fin)
+               if (th->fin)
                        tcp_fin(skb, sk, th);
 
                if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -3248,7 +3476,7 @@ drop:
                           TCP_SKB_CB(skb)->end_seq);
 
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
-               
+
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
                 */
@@ -3327,7 +3555,7 @@ drop:
                        }
                }
                __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
-               
+
                /* And clean segments covered by new one as whole. */
                while ((skb1 = skb->next) !=
                       (struct sk_buff*)&tp->out_of_order_queue &&
@@ -3492,7 +3720,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
  */
 static int tcp_prune_queue(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk); 
+       struct tcp_sock *tp = tcp_sk(sk);
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
@@ -3602,7 +3830,7 @@ static void tcp_new_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_should_expand_sndbuf(sk, tp)) {
-               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
                                                   tp->reordering + 1);
@@ -3675,7 +3903,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
  *     For 1003.1g we should support a new option TCP_STDURG to permit
  *     either form (or just set the sysctl tcp_stdurg).
  */
+
 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -3756,7 +3984,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
                u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
                          th->syn;
 
-               /* Is the urgent pointer pointing into this packet? */   
+               /* Is the urgent pointer pointing into this packet? */
                if (ptr < skb->len) {
                        u8 tmp;
                        if (skb_copy_bits(skb, ptr, &tmp, 1))
@@ -3820,7 +4048,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
        int copied_early = 0;
 
        if (tp->ucopy.wakeup)
-               return 0;
+               return 0;
 
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
                tp->ucopy.dma_chan = get_softnet_dma();
@@ -3856,26 +4084,26 @@ out:
 #endif /* CONFIG_NET_DMA */
 
 /*
- *     TCP receive function for the ESTABLISHED state. 
+ *     TCP receive function for the ESTABLISHED state.
  *
- *     It is split into a fast path and a slow path. The fast path is 
+ *     It is split into a fast path and a slow path. The fast path is
  *     disabled when:
  *     - A zero window was announced from us - zero window probing
- *        is only handled properly in the slow path. 
+ *        is only handled properly in the slow path.
  *     - Out of order segments arrived.
  *     - Urgent data is expected.
  *     - There is no buffer space left
  *     - Unexpected TCP flags/window values/header lengths are received
- *       (detected by checking the TCP header against pred_flags) 
+ *       (detected by checking the TCP header against pred_flags)
  *     - Data is sent in both directions. Fast path only supports pure senders
  *       or pure receivers (this means either the sequence number or the ack
  *       value must stay constant)
  *     - Unexpected TCP option.
  *
- *     When these conditions are not satisfied it drops into a standard 
+ *     When these conditions are not satisfied it drops into a standard
  *     receive procedure patterned after RFC793 to handle all cases.
  *     The first three cases are guaranteed by proper pred_flags setting,
- *     the rest is checked inline. Fast processing is turned on in 
+ *     the rest is checked inline. Fast processing is turned on in
  *     tcp_data_queue when everything is OK.
  */
 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
@@ -3885,15 +4113,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
        /*
         *      Header prediction.
-        *      The code loosely follows the one in the famous 
+        *      The code loosely follows the one in the famous
         *      "30 instruction TCP receive" Van Jacobson mail.
-        *      
-        *      Van's trick is to deposit buffers into socket queue 
+        *
+        *      Van's trick is to deposit buffers into socket queue
         *      on a device interrupt, to call tcp_recv function
         *      on the receive process context and checksum and copy
         *      the buffer to user space. smart...
         *
-        *      Our current scheme is not silly either but we take the 
+        *      Our current scheme is not silly either but we take the
         *      extra cost of the net_bh soft interrupt processing...
         *      We do checksum and copy also but from device to kernel.
         */
@@ -3904,7 +4132,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         *      if header_prediction is to be made
         *      'S' will always be tp->tcp_header_len >> 2
         *      '?' will be 0 for the fast path, otherwise pred_flags is 0 to
-        *  turn it off (when there are holes in the receive 
+        *  turn it off (when there are holes in the receive
         *       space for instance)
         *      PSH flag is ignored.
         */
@@ -3928,7 +4156,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                goto slow_path;
 
                        tp->rx_opt.saw_tstamp = 1;
-                       ++ptr; 
+                       ++ptr;
                        tp->rx_opt.rcv_tsval = ntohl(*ptr);
                        ++ptr;
                        tp->rx_opt.rcv_tsecr = ntohl(*ptr);
@@ -3960,7 +4188,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                 * on entry.
                                 */
                                tcp_ack(sk, skb, 0);
-                               __kfree_skb(skb); 
+                               __kfree_skb(skb);
                                tcp_data_snd_check(sk, tp);
                                return 0;
                        } else { /* Header too small */
@@ -4094,7 +4322,7 @@ slow_path:
                goto discard;
        }
 
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4109,7 +4337,7 @@ slow_path:
        }
 
 step5:
-       if(th->ack)
+       if (th->ack)
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
        tcp_rcv_rtt_measure_ts(sk, skb);
@@ -4378,11 +4606,11 @@ reset_and_undo:
 
 /*
  *     This function implements the receiving procedure of RFC 793 for
- *     all states except ESTABLISHED and TIME_WAIT. 
+ *     all states except ESTABLISHED and TIME_WAIT.
  *     It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
  *     address independent.
  */
-       
+
 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                          struct tcphdr *th, unsigned len)
 {
@@ -4397,29 +4625,29 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                goto discard;
 
        case TCP_LISTEN:
-               if(th->ack)
+               if (th->ack)
                        return 1;
 
-               if(th->rst)
+               if (th->rst)
                        goto discard;
 
-               if(th->syn) {
+               if (th->syn) {
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
-                       /* Now we have several options: In theory there is 
-                        * nothing else in the frame. KA9Q has an option to 
+                       /* Now we have several options: In theory there is
+                        * nothing else in the frame. KA9Q has an option to
                         * send data with the syn, BSD accepts data with the
-                        * syn up to the [to be] advertised window and 
-                        * Solaris 2.1 gives you a protocol error. For now 
-                        * we just ignore it, that fits the spec precisely 
+                        * syn up to the [to be] advertised window and
+                        * Solaris 2.1 gives you a protocol error. For now
+                        * we just ignore it, that fits the spec precisely
                         * and avoids incompatibilities. It would be nice in
                         * future to drop through and process the data.
                         *
-                        * Now that TTCP is starting to be used we ought to 
+                        * Now that TTCP is starting to be used we ought to
                         * queue this data.
                         * But, this leaves one open to an easy denial of
-                        * service attack, and SYN cookies can't defend
+                        * service attack, and SYN cookies can't defend
                         * against this problem. So, we drop the data
                         * in the interest of security over speed unless
                         * it's still in use.
@@ -4459,7 +4687,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        /* step 2: check RST bit */
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4482,7 +4710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (th->ack) {
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
 
-               switch(sk->sk_state) {
+               switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
@@ -4609,7 +4837,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        case TCP_FIN_WAIT1:
        case TCP_FIN_WAIT2:
                /* RFC 793 says to queue data in these states,
-                * RFC 1122 says we MUST send a reset. 
+                * RFC 1122 says we MUST send a reset.
                 * BSD 4.4 also does reset.
                 */
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
@@ -4621,7 +4849,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        }
                }
                /* Fall through */
-       case TCP_ESTABLISHED: 
+       case TCP_ESTABLISHED:
                tcp_data_queue(sk, skb);
                queued = 1;
                break;
@@ -4633,7 +4861,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                tcp_ack_snd_check(sk);
        }
 
-       if (!queued) { 
+       if (!queued) {
 discard:
                __kfree_skb(skb);
        }