2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.14.2.1 2002/03/05 04:30:08 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/sysctl.h>
27 #include <net/inet_common.h>
30 #define SYNC_INIT 0 /* let the user enable it */
35 int sysctl_tcp_tw_recycle = 0;
36 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
38 int sysctl_tcp_syncookies = SYNC_INIT;
39 int sysctl_tcp_abort_on_overflow = 0;
41 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
45 if (after(end_seq, s_win) && before(seq, e_win))
47 return (seq == e_win && seq == end_seq);
50 /* New-style handling of TIME_WAIT sockets. */
55 /* Must be called with locally disabled BHs. */
56 void tcp_timewait_kill(struct tcp_tw_bucket *tw)
58 struct tcp_ehash_bucket *ehead;
59 struct tcp_bind_hashbucket *bhead;
60 struct tcp_bind_bucket *tb;
62 /* Unlink from established hashes. */
63 ehead = &tcp_ehash[tw->hashent];
64 write_lock(&ehead->lock);
66 write_unlock(&ehead->lock);
70 tw->next->pprev = tw->pprev;
71 *(tw->pprev) = tw->next;
73 write_unlock(&ehead->lock);
75 /* Disassociate with bind bucket. */
76 bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
77 spin_lock(&bhead->lock);
80 tw->bind_next->bind_pprev = tw->bind_pprev;
81 *(tw->bind_pprev) = tw->bind_next;
83 if (tb->owners == NULL) {
85 tb->next->pprev = tb->pprev;
86 *(tb->pprev) = tb->next;
87 kmem_cache_free(tcp_bucket_cachep, tb);
89 spin_unlock(&bhead->lock);
91 #ifdef INET_REFCNT_DEBUG
92 if (atomic_read(&tw->refcnt) != 1) {
93 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt));
100 * * Main purpose of TIME-WAIT state is to close connection gracefully,
101 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
102 * (and, probably, tail of data) and one or more our ACKs are lost.
103 * * What is TIME-WAIT timeout? It is associated with maximal packet
104 * lifetime in the internet, which results in wrong conclusion, that
105 * it is set to catch "old duplicate segments" wandering out of their path.
106 * It is not quite correct. This timeout is calculated so that it exceeds
107 * maximal retransmission timeout enough to allow to lose one (or more)
108 * segments sent by peer and our ACKs. This time may be calculated from RTO.
109 * * When TIME-WAIT socket receives RST, it means that another end
110 * finally closed and we are allowed to kill TIME-WAIT too.
111 * * Second purpose of TIME-WAIT is catching old duplicate segments.
112 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
113 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
114 * * If we invented some more clever way to catch duplicates
115 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
117 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
118 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
119 * from the very beginning.
121 * NOTE. With recycling (and later with fin-wait-2) TW bucket
122 * is _not_ stateless. It means, that strictly speaking we must
123 * spinlock it. I do not want! Well, probability of misbehaviour
124 * is ridiculously low and, seems, we could use some mb() tricks
125 * to avoid misread sequence numbers, states etc. --ANK
128 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
129 struct tcphdr *th, unsigned len)
135 if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) {
136 tcp_parse_options(skb, &tp, 0);
139 tp.ts_recent = tw->ts_recent;
140 tp.ts_recent_stamp = tw->ts_recent_stamp;
141 paws_reject = tcp_paws_check(&tp, th->rst);
145 if (tw->substate == TCP_FIN_WAIT2) {
146 /* Just repeat all the checks of tcp_rcv_state_process() */
148 /* Out of window, send ACK */
150 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
151 tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd))
157 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->rcv_nxt))
161 if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) ||
162 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
164 return TCP_TW_SUCCESS;
167 /* New data or FIN. If new data arrive after half-duplex close,
170 if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) {
172 tcp_tw_deschedule(tw);
173 tcp_timewait_kill(tw);
178 /* FIN arrived, enter true time-wait state. */
179 tw->substate = TCP_TIME_WAIT;
180 tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
182 tw->ts_recent_stamp = xtime.tv_sec;
183 tw->ts_recent = tp.rcv_tsval;
186 /* I am shamed, but failed to make it more elegant.
187 * Yes, it is direct reference to IP, which is impossible
188 * to generalize to IPv6. Taking into account that IPv6
189 * do not undertsnad recycling in any case, it not
190 * a big problem in practice. --ANK */
191 if (tw->family == AF_INET &&
192 sysctl_tcp_tw_recycle && tw->ts_recent_stamp &&
193 tcp_v4_tw_remember_stamp(tw))
194 tcp_tw_schedule(tw, tw->timeout);
196 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
201 * Now real TIME-WAIT state.
204 * "When a connection is [...] on TIME-WAIT state [...]
205 * [a TCP] MAY accept a new SYN from the remote TCP to
206 * reopen the connection directly, if it:
208 * (1) assigns its initial sequence number for the new
209 * connection to be larger than the largest sequence
210 * number it used on the previous connection incarnation,
213 * (2) returns to TIME-WAIT state if the SYN turns out
214 * to be an old duplicate".
218 (TCP_SKB_CB(skb)->seq == tw->rcv_nxt &&
219 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
220 /* In window segment, it may be only reset or bare ack. */
223 /* This is TIME_WAIT assasination, in two flavors.
224 * Oh well... nobody has a sufficient solution to this
227 if (sysctl_tcp_rfc1337 == 0) {
229 tcp_tw_deschedule(tw);
230 tcp_timewait_kill(tw);
232 return TCP_TW_SUCCESS;
235 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
238 tw->ts_recent = tp.rcv_tsval;
239 tw->ts_recent_stamp = xtime.tv_sec;
243 return TCP_TW_SUCCESS;
246 /* Out of window segment.
248 All the segments are ACKed immediately.
250 The only exception is new SYN. We accept it, if it is
251 not old duplicate and we are not in danger to be killed
252 by delayed old duplicates. RFC check is that it has
253 newer sequence number works at rates <40Mbit/sec.
254 However, if paws works, it is reliable AND even more,
255 we even may relax silly seq space cutoff.
257 RED-PEN: we violate main RFC requirement, if this SYN will appear
258 old duplicate (i.e. we receive RST in reply to SYN-ACK),
259 we must return socket to time-wait state. It is not good,
263 if (th->syn && !th->rst && !th->ack && !paws_reject &&
264 (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) ||
265 (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) {
266 u32 isn = tw->snd_nxt+65535+2;
269 TCP_SKB_CB(skb)->when = isn;
274 NET_INC_STATS_BH(PAWSEstabRejected);
277 /* In this case we must reset the TIMEWAIT timer.
279 * If it is ACKless SYN it may be both old duplicate
280 * and new good SYN with random sequence number <rcv_nxt.
281 * Do not reschedule in the last case.
283 if (paws_reject || th->ack)
284 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
286 /* Send ACK. Note, we do not put the bucket,
287 * it will be released by caller.
292 return TCP_TW_SUCCESS;
295 /* Enter the time wait state. This is called with locally disabled BH.
296 * Essentially we whip up a timewait bucket, copy the
297 * relevant info into it from the SK, and mess with hash chains
300 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
302 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent];
303 struct tcp_bind_hashbucket *bhead;
304 struct sock **head, *sktw;
306 /* Step 1: Put TW into bind hash. Original socket stays there too.
307 Note, that any socket with sk->num!=0 MUST be bound in binding
308 cache, even if it is closed.
310 bhead = &tcp_bhash[tcp_bhashfn(sk->num)];
311 spin_lock(&bhead->lock);
312 tw->tb = (struct tcp_bind_bucket *)sk->prev;
313 BUG_TRAP(sk->prev!=NULL);
314 if ((tw->bind_next = tw->tb->owners) != NULL)
315 tw->tb->owners->bind_pprev = &tw->bind_next;
316 tw->tb->owners = (struct sock*)tw;
317 tw->bind_pprev = &tw->tb->owners;
318 spin_unlock(&bhead->lock);
320 write_lock(&ehead->lock);
322 /* Step 2: Remove SK from established hash. */
325 sk->next->pprev = sk->pprev;
326 *sk->pprev = sk->next;
328 sock_prot_dec_use(sk->prot);
331 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
332 head = &(ehead + tcp_ehash_size)->chain;
333 sktw = (struct sock *)tw;
334 if((sktw->next = *head) != NULL)
335 (*head)->pprev = &sktw->next;
338 atomic_inc(&tw->refcnt);
340 write_unlock(&ehead->lock);
344 * Move a socket to time-wait or dead fin-wait-2 state.
346 void tcp_time_wait(struct sock *sk, int state, int timeo)
348 struct tcp_tw_bucket *tw = NULL;
349 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
352 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
353 recycle_ok = tp->af_specific->remember_stamp(sk);
355 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
356 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
359 int rto = (tp->rto<<2) - (tp->rto>>1);
361 /* Give us an identity. */
362 tw->daddr = sk->daddr;
363 tw->rcv_saddr = sk->rcv_saddr;
364 tw->bound_dev_if= sk->bound_dev_if;
366 tw->state = TCP_TIME_WAIT;
367 tw->substate = state;
368 tw->sport = sk->sport;
369 tw->dport = sk->dport;
370 tw->family = sk->family;
371 tw->reuse = sk->reuse;
372 tw->rcv_wscale = tp->rcv_wscale;
373 atomic_set(&tw->refcnt, 1);
375 tw->hashent = sk->hashent;
376 tw->rcv_nxt = tp->rcv_nxt;
377 tw->snd_nxt = tp->snd_nxt;
378 tw->rcv_wnd = tcp_receive_window(tp);
379 tw->ts_recent = tp->ts_recent;
380 tw->ts_recent_stamp= tp->ts_recent_stamp;
381 tw->pprev_death = NULL;
383 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
384 if(tw->family == PF_INET6) {
385 memcpy(&tw->v6_daddr,
386 &sk->net_pinfo.af_inet6.daddr,
387 sizeof(struct in6_addr));
388 memcpy(&tw->v6_rcv_saddr,
389 &sk->net_pinfo.af_inet6.rcv_saddr,
390 sizeof(struct in6_addr));
393 /* Linkage updates. */
394 __tcp_tw_hashdance(sk, tw);
396 /* Get the TIME_WAIT timeout firing. */
403 tw->timeout = TCP_TIMEWAIT_LEN;
404 if (state == TCP_TIME_WAIT)
405 timeo = TCP_TIMEWAIT_LEN;
408 tcp_tw_schedule(tw, timeo);
411 /* Sorry, if we're out of memory, just CLOSE this
412 * socket up. We've got bigger problems than
413 * non-graceful socket closings.
416 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
419 tcp_update_metrics(sk);
423 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
424 static int tcp_tw_death_row_slot = 0;
426 static void tcp_twkill(unsigned long);
428 static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
429 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
430 static struct timer_list tcp_tw_timer = { function: tcp_twkill };
432 static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
434 struct tcp_tw_bucket *tw;
437 /* NOTE: compare this to previous version where lock
438 * was released after detaching chain. It was racy,
439 * because tw buckets are scheduled in not serialized context
440 * in 2.3 (with netfilter), and with softnet it is common, because
441 * soft irqs are not sequenced.
443 spin_lock(&tw_death_lock);
445 if (tcp_tw_count == 0)
448 while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
449 tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
451 tw->next_death->pprev_death = tw->pprev_death;
452 tw->pprev_death = NULL;
453 spin_unlock(&tw_death_lock);
455 tcp_timewait_kill(tw);
460 spin_lock(&tw_death_lock);
462 tcp_tw_death_row_slot =
463 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
465 if ((tcp_tw_count -= killed) != 0)
466 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
467 net_statistics[smp_processor_id()*2].TimeWaited += killed;
469 spin_unlock(&tw_death_lock);
472 SMP_TIMER_DEFINE(tcp_twkill, tcp_twkill_task);
474 /* These are always called from BH context. See callers in
475 * tcp_input.c to verify this.
478 /* This is for handling early-kills of TIME_WAIT sockets. */
479 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
481 spin_lock(&tw_death_lock);
482 if (tw->pprev_death) {
484 tw->next_death->pprev_death = tw->pprev_death;
485 *tw->pprev_death = tw->next_death;
486 tw->pprev_death = NULL;
488 if (--tcp_tw_count == 0)
489 del_timer(&tcp_tw_timer);
491 spin_unlock(&tw_death_lock);
494 /* Short-time timewait calendar */
496 static int tcp_twcal_hand = -1;
497 static int tcp_twcal_jiffie;
498 static void tcp_twcal_tick(unsigned long);
499 static struct timer_list tcp_twcal_timer = {function: tcp_twcal_tick};
500 static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
502 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
504 struct tcp_tw_bucket **tpp;
507 /* timeout := RTO * 3.5
509 * 3.5 = 1+2+0.5 to wait for two retransmits.
511 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
512 * our ACK acking that FIN can be lost. If N subsequent retransmitted
513 * FINs (or previous seqments) are lost (probability of such event
514 * is p^(N+1), where p is probability to lose single packet and
515 * time to detect the loss is about RTO*(2^N - 1) with exponential
516 * backoff). Normal timewait length is calculated so, that we
517 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
518 * [ BTW Linux. following BSD, violates this requirement waiting
519 * only for 60sec, we should wait at least for 240 secs.
520 * Well, 240 consumes too much of resources 8)
522 * This interval is not reduced to catch old duplicate and
523 * responces to our wandering segments living for two MSLs.
524 * However, if we use PAWS to detect
525 * old duplicates, we can reduce the interval to bounds required
526 * by RTO, rather than MSL. So, if peer understands PAWS, we
527 * kill tw bucket after 3.5*RTO (it is important that this number
528 * is greater than TS tick!) and detect old duplicates with help
531 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
533 spin_lock(&tw_death_lock);
535 /* Unlink it, if it was scheduled */
536 if (tw->pprev_death) {
538 tw->next_death->pprev_death = tw->pprev_death;
539 *tw->pprev_death = tw->next_death;
540 tw->pprev_death = NULL;
543 atomic_inc(&tw->refcnt);
545 if (slot >= TCP_TW_RECYCLE_SLOTS) {
546 /* Schedule to slow timer */
547 if (timeo >= TCP_TIMEWAIT_LEN) {
548 slot = TCP_TWKILL_SLOTS-1;
550 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
551 if (slot >= TCP_TWKILL_SLOTS)
552 slot = TCP_TWKILL_SLOTS-1;
554 tw->ttd = jiffies + timeo;
555 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
556 tpp = &tcp_tw_death_row[slot];
558 tw->ttd = jiffies + (slot<<TCP_TW_RECYCLE_TICK);
560 if (tcp_twcal_hand < 0) {
562 tcp_twcal_jiffie = jiffies;
563 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
564 add_timer(&tcp_twcal_timer);
566 if ((long)(tcp_twcal_timer.expires - jiffies) > (slot<<TCP_TW_RECYCLE_TICK))
567 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
568 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
570 tpp = &tcp_twcal_row[slot];
573 if((tw->next_death = *tpp) != NULL)
574 (*tpp)->pprev_death = &tw->next_death;
576 tw->pprev_death = tpp;
578 if (tcp_tw_count++ == 0)
579 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
580 spin_unlock(&tw_death_lock);
583 void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
587 unsigned long now = jiffies;
591 spin_lock(&tw_death_lock);
592 if (tcp_twcal_hand < 0)
595 slot = tcp_twcal_hand;
596 j = tcp_twcal_jiffie;
598 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
599 if ((long)(j - now) <= 0) {
600 struct tcp_tw_bucket *tw;
602 while((tw = tcp_twcal_row[slot]) != NULL) {
603 tcp_twcal_row[slot] = tw->next_death;
604 tw->pprev_death = NULL;
606 tcp_timewait_kill(tw);
613 tcp_twcal_jiffie = j;
614 tcp_twcal_hand = slot;
617 if (tcp_twcal_row[slot] != NULL) {
618 mod_timer(&tcp_twcal_timer, j);
622 j += (1<<TCP_TW_RECYCLE_TICK);
623 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
628 if ((tcp_tw_count -= killed) == 0)
629 del_timer(&tcp_tw_timer);
630 net_statistics[smp_processor_id()*2].TimeWaitKilled += killed;
631 spin_unlock(&tw_death_lock);
634 SMP_TIMER_DEFINE(tcp_twcal_tick, tcp_twcal_tasklet);
637 /* This is not only more efficient than what we used to do, it eliminates
638 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
640 * Actually, we could lots of memory writes here. tp of listening
641 * socket contains all necessary default parameters.
643 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
645 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0);
648 struct tcp_opt *newtp;
650 struct sk_filter *filter;
653 memcpy(newsk, sk, sizeof(*newsk));
654 newsk->state = TCP_SYN_RECV;
660 /* Clone the TCP header template */
661 newsk->dport = req->rmt_port;
663 sock_lock_init(newsk);
666 newsk->dst_lock = RW_LOCK_UNLOCKED;
667 atomic_set(&newsk->rmem_alloc, 0);
668 skb_queue_head_init(&newsk->receive_queue);
669 atomic_set(&newsk->wmem_alloc, 0);
670 skb_queue_head_init(&newsk->write_queue);
671 atomic_set(&newsk->omem_alloc, 0);
672 newsk->wmem_queued = 0;
673 newsk->forward_alloc = 0;
676 newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK;
678 newsk->backlog.head = newsk->backlog.tail = NULL;
679 newsk->callback_lock = RW_LOCK_UNLOCKED;
680 skb_queue_head_init(&newsk->error_queue);
681 newsk->write_space = tcp_write_space;
683 if ((filter = newsk->filter) != NULL)
684 sk_filter_charge(newsk, filter);
687 /* Now setup tcp_opt */
688 newtp = &(newsk->tp_pinfo.af_tcp);
689 newtp->pred_flags = 0;
690 newtp->rcv_nxt = req->rcv_isn + 1;
691 newtp->snd_nxt = req->snt_isn + 1;
692 newtp->snd_una = req->snt_isn + 1;
693 newtp->snd_sml = req->snt_isn + 1;
695 tcp_prequeue_init(newtp);
697 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
699 newtp->retransmits = 0;
702 newtp->mdev = TCP_TIMEOUT_INIT;
703 newtp->rto = TCP_TIMEOUT_INIT;
705 newtp->packets_out = 0;
707 newtp->retrans_out = 0;
708 newtp->sacked_out = 0;
709 newtp->fackets_out = 0;
710 newtp->snd_ssthresh = 0x7fffffff;
712 /* So many TCP implementations out there (incorrectly) count the
713 * initial SYN frame in their delayed-ACK and congestion control
714 * algorithms that we must have the following bandaid to talk
715 * efficiently to them. -DaveM
718 newtp->snd_cwnd_cnt = 0;
720 newtp->frto_counter = 0;
721 newtp->frto_highmark = 0;
723 tcp_set_ca_state(newtp, TCP_CA_Open);
724 tcp_init_xmit_timers(newsk);
725 skb_queue_head_init(&newtp->out_of_order_queue);
726 newtp->send_head = NULL;
727 newtp->rcv_wup = req->rcv_isn + 1;
728 newtp->write_seq = req->snt_isn + 1;
729 newtp->pushed_seq = newtp->write_seq;
730 newtp->copied_seq = req->rcv_isn + 1;
732 newtp->saw_tstamp = 0;
735 newtp->eff_sacks = 0;
737 newtp->probes_out = 0;
738 newtp->num_sacks = 0;
740 newtp->listen_opt = NULL;
741 newtp->accept_queue = newtp->accept_queue_tail = NULL;
742 /* Deinitialize syn_wait_lock to trap illegal accesses. */
743 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
745 /* Back to base struct sock members. */
748 atomic_set(&newsk->refcnt, 2);
749 #ifdef INET_REFCNT_DEBUG
750 atomic_inc(&inet_sock_nr);
752 atomic_inc(&tcp_sockets_allocated);
755 tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp));
756 newsk->socket = NULL;
759 newtp->tstamp_ok = req->tstamp_ok;
760 if((newtp->sack_ok = req->sack_ok) != 0) {
764 newtp->window_clamp = req->window_clamp;
765 newtp->rcv_ssthresh = req->rcv_wnd;
766 newtp->rcv_wnd = req->rcv_wnd;
767 newtp->wscale_ok = req->wscale_ok;
768 if (newtp->wscale_ok) {
769 newtp->snd_wscale = req->snd_wscale;
770 newtp->rcv_wscale = req->rcv_wscale;
772 newtp->snd_wscale = newtp->rcv_wscale = 0;
773 newtp->window_clamp = min(newtp->window_clamp, 65535U);
775 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
776 newtp->max_window = newtp->snd_wnd;
778 if (newtp->tstamp_ok) {
779 newtp->ts_recent = req->ts_recent;
780 newtp->ts_recent_stamp = xtime.tv_sec;
781 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
783 newtp->ts_recent_stamp = 0;
784 newtp->tcp_header_len = sizeof(struct tcphdr);
786 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
787 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
788 newtp->mss_clamp = req->mss;
789 TCP_ECN_openreq_child(newtp, req);
792 TCP_INC_STATS_BH(TcpPassiveOpens);
798 * Process an incoming packet for SYN_RECV sockets represented
799 * as an open_request.
802 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
803 struct open_request *req,
804 struct open_request **prev)
806 struct tcphdr *th = skb->h.th;
807 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
808 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
814 if (th->doff > (sizeof(struct tcphdr)>>2)) {
815 tcp_parse_options(skb, &ttp, 0);
817 if (ttp.saw_tstamp) {
818 ttp.ts_recent = req->ts_recent;
819 /* We do not store true stamp, but it is not required,
820 * it can be estimated (approximately)
823 ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
824 paws_reject = tcp_paws_check(&ttp, th->rst);
828 /* Check for pure retransmitted SYN. */
829 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
830 flg == TCP_FLAG_SYN &&
833 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
834 * this case on figure 6 and figure 8, but formal
835 * protocol description says NOTHING.
836 * To be more exact, it says that we should send ACK,
837 * because this segment (at least, if it has no data)
840 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
841 * describe SYN-RECV state. All the description
842 * is wrong, we cannot believe to it and should
843 * rely only on common sense and implementation
846 * Enforce "SYN-ACK" according to figure 8, figure 6
847 * of RFC793, fixed by RFC1122.
849 req->class->rtx_syn_ack(sk, req, NULL);
853 /* Further reproduces section "SEGMENT ARRIVES"
854 for state SYN-RECEIVED of RFC793.
855 It is broken, however, it does not work only
856 when SYNs are crossed.
858 You would think that SYN crossing is impossible here, since
859 we should have a SYN_SENT socket (from connect()) on our end,
860 but this is not true if the crossed SYNs were sent to both
861 ends by a malicious third party. We must defend against this,
862 and to do that we first verify the ACK (as per RFC793, page
863 36) and reset if it is invalid. Is this a true full defense?
864 To convince ourselves, let us consider a way in which the ACK
865 test can still pass in this 'malicious crossed SYNs' case.
866 Malicious sender sends identical SYNs (and thus identical sequence
867 numbers) to both A and B:
872 By our good fortune, both A and B select the same initial
873 send sequence number of seven :-)
875 A: sends SYN|ACK, seq=7, ack_seq=8
876 B: sends SYN|ACK, seq=7, ack_seq=8
878 So we are now A eating this SYN|ACK, ACK test passes. So
879 does sequence test, SYN is truncated, and thus we consider
882 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
883 we create an established connection. Both ends (listening sockets)
884 accept the new incoming connection and try to talk to each other. 8-)
886 Note: This case is both harmless, and rare. Possibility is about the
887 same as us discovering intelligent life on another plant tomorrow.
889 But generally, we should (RFC lies!) to accept ACK
890 from SYNACK both here and in tcp_rcv_state_process().
891 tcp_rcv_state_process() does not, hence, we do not too.
893 Note that the case is absolutely generic:
894 we cannot optimize anything here without
895 violating protocol. All the checks must be made
896 before attempt to create socket.
899 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
900 * and the incoming segment acknowledges something not yet
901 * sent (the segment carries an unaccaptable ACK) ...
904 * Invalid ACK: reset will be sent by listening socket
906 if ((flg & TCP_FLAG_ACK) &&
907 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
910 /* Also, it would be not so bad idea to check rcv_tsecr, which
911 * is essentially ACK extension and too early or too late values
912 * should cause reset in unsynchronized states.
915 /* RFC793: "first check sequence number". */
917 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
918 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
919 /* Out of window: send ACK and drop. */
920 if (!(flg & TCP_FLAG_RST))
921 req->class->send_ack(skb, req);
923 NET_INC_STATS_BH(PAWSEstabRejected);
927 /* In sequence, PAWS is OK. */
929 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
930 req->ts_recent = ttp.rcv_tsval;
932 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
933 /* Truncate SYN, it is out of window starting
934 at req->rcv_isn+1. */
935 flg &= ~TCP_FLAG_SYN;
938 /* RFC793: "second check the RST bit" and
939 * "fourth, check the SYN bit"
941 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
942 goto embryonic_reset;
944 /* ACK sequence verified above, just make sure ACK is
945 * set. If ACK not set, just silently drop the packet.
947 if (!(flg & TCP_FLAG_ACK))
950 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
951 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
956 /* OK, ACK is valid, create big socket and
957 * feed this segment to it. It will repeat all
958 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
959 * ESTABLISHED STATE. If it will be dropped after
960 * socket is created, wait for troubles.
962 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
964 goto listen_overflow;
966 tcp_synq_unlink(tp, req, prev);
967 tcp_synq_removed(sk, req);
969 tcp_acceptq_queue(sk, req, child);
973 if (!sysctl_tcp_abort_on_overflow) {
979 NET_INC_STATS_BH(EmbryonicRsts);
980 if (!(flg & TCP_FLAG_RST))
981 req->class->send_reset(skb);
983 tcp_synq_drop(sk, req, prev);
988 * Queue segment on the new socket if the new socket is active,
989 * otherwise we just shortcircuit this and continue with
993 int tcp_child_process(struct sock *parent, struct sock *child,
997 int state = child->state;
999 if (child->lock.users == 0) {
1000 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1002 /* Wakeup parent, send SIGIO */
1003 if (state == TCP_SYN_RECV && child->state != state)
1004 parent->data_ready(parent, 0);
1006 /* Alas, it is possible again, because we do lookup
1007 * in main socket hash table and lock on listening
1008 * socket does not protect us more.
1010 sk_add_backlog(child, skb);
1013 bh_unlock_sock(child);