2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
27 #include <linux/config.h>
28 #include <linux/tcp.h>
29 #include <linux/slab.h>
30 #include <linux/cache.h>
31 #include <net/checksum.h>
34 /* This is for all connections with a full identity, no wildcards.
35 * New scheme, half the table is for TIME_WAIT, the other half is
36 * for the rest. I'll experiment with dynamic table growth later.
38 struct tcp_ehash_bucket {
41 } __attribute__((__aligned__(8)));
43 /* This is for listening sockets, thus all sockets which possess wildcards. */
44 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
46 /* There are a few simple rules, which allow for local port reuse by
47 * an application. In essence:
49 * 1) Sockets bound to different interfaces may share a local port.
50 * Failing that, goto test 2.
51 * 2) If all sockets have sk->reuse set, and none of them are in
52 * TCP_LISTEN state, the port may be shared.
53 * Failing that, goto test 3.
54 * 3) If all sockets are bound to a specific sk->rcv_saddr local
55 * address, and none of them are the same, the port may be
57 * Failing this, the port cannot be shared.
59 * The interesting point, is test #2. This is what an FTP server does
60 * all day. To optimize this case we use a specific flag bit defined
61 * below. As we add sockets to a bind bucket list, we perform a
62 * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
63 * As long as all sockets added to a bind bucket pass this test,
64 * the flag bit will be set.
65 * The resulting situation is that tcp_v[46]_verify_bind() can just check
66 * for this flag bit, if it is set and the socket trying to bind has
67 * sk->reuse set, we don't even have to walk the owners list at all,
68 * we return that it is ok to bind this socket to the requested local port.
70 * Sounds like a lot of work, but it is worth it. In a more naive
71 * implementation (ie. current FreeBSD etc.) the entire list of ports
72 * must be walked for each data port opened by an ftp server. Needless
73 * to say, this does not scale at all. With a couple thousand FTP
74 * users logged onto your box, isn't it nice to know that new data
75 * ports are created in O(1) time? I thought so. ;-) -DaveM
77 struct tcp_bind_bucket {
79 signed short fastreuse;
80 struct tcp_bind_bucket *next;
82 struct tcp_bind_bucket **pprev;
85 struct tcp_bind_hashbucket {
87 struct tcp_bind_bucket *chain;
90 extern struct tcp_hashinfo {
91 /* This is for sockets with full identity only. Sockets here will
92 * always be without wildcards and will have the following invariant:
94 * TCP_ESTABLISHED <= sk->state < TCP_CLOSE
96 * First half of the table is for sockets not in TIME_WAIT, second half
97 * is for TIME_WAIT sockets only.
99 struct tcp_ehash_bucket *__tcp_ehash;
101 /* Ok, let's try this, I give up, we do need a local binding
102 * TCP hash as well as the others for fast bind/connect.
104 struct tcp_bind_hashbucket *__tcp_bhash;
106 int __tcp_bhash_size;
107 int __tcp_ehash_size;
109 /* All sockets in TCP_LISTEN state will be in here. This is the only
110 * table where wildcard'd TCP sockets can exist. Hash function here
111 * is just local port number.
113 struct sock *__tcp_listening_hash[TCP_LHTABLE_SIZE];
115 /* All the above members are written once at bootup and
116 * never written again _or_ are predominantly read-access.
118 * Now align to a new cache line as all the following members
121 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
122 atomic_t __tcp_lhash_users;
123 wait_queue_head_t __tcp_lhash_wait;
124 spinlock_t __tcp_portalloc_lock;
127 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
128 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
129 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
130 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
131 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
132 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
133 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
134 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
135 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
137 extern kmem_cache_t *tcp_bucket_cachep;
138 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
139 unsigned short snum);
140 extern void tcp_bucket_unlock(struct sock *sk);
141 extern int tcp_port_rover;
142 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
144 /* These are AF independent. */
145 static __inline__ int tcp_bhashfn(__u16 lport)
147 return (lport & (tcp_bhash_size - 1));
150 /* This is a TIME_WAIT bucket. It works around the memory consumption
151 * problems of sockets in such a state on heavily loaded servers, but
152 * without violating the protocol specification.
154 struct tcp_tw_bucket {
155 /* These _must_ match the beginning of struct sock precisely.
156 * XXX Yes I know this is gross, but I'd have to edit every single
157 * XXX networking file if I created a "struct sock_header". -DaveM
166 struct sock *bind_next;
167 struct sock **bind_pprev;
169 substate; /* "zapped" is replaced with "substate" */
171 unsigned short family;
173 rcv_wscale; /* It is also TW bucket specific */
176 /* And these are ours. */
183 long ts_recent_stamp;
185 struct tcp_bind_bucket *tb;
186 struct tcp_tw_bucket *next_death;
187 struct tcp_tw_bucket **pprev_death;
189 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
190 struct in6_addr v6_daddr;
191 struct in6_addr v6_rcv_saddr;
195 extern kmem_cache_t *tcp_timewait_cachep;
197 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
199 if (atomic_dec_and_test(&tw->refcnt)) {
200 #ifdef INET_REFCNT_DEBUG
201 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
203 kmem_cache_free(tcp_timewait_cachep, tw);
207 extern atomic_t tcp_orphan_count;
208 extern int tcp_tw_count;
209 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
210 extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
211 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
212 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
215 /* Socket demux engine toys. */
217 #define TCP_COMBINED_PORTS(__sport, __dport) \
218 (((__u32)(__sport)<<16) | (__u32)(__dport))
219 #else /* __LITTLE_ENDIAN */
220 #define TCP_COMBINED_PORTS(__sport, __dport) \
221 (((__u32)(__dport)<<16) | (__u32)(__sport))
224 #if (BITS_PER_LONG == 64)
226 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
227 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
228 #else /* __LITTLE_ENDIAN */
229 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
230 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
231 #endif /* __BIG_ENDIAN */
232 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
233 (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \
234 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
235 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
236 #else /* 32-bit arch */
237 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
238 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
239 (((__sk)->daddr == (__saddr)) && \
240 ((__sk)->rcv_saddr == (__daddr)) && \
241 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
242 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
243 #endif /* 64-bit arch */
245 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
246 (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
247 ((__sk)->family == AF_INET6) && \
248 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \
249 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \
250 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
252 /* These can have wildcards, don't try too hard. */
253 static __inline__ int tcp_lhashfn(unsigned short num)
255 return num & (TCP_LHTABLE_SIZE - 1);
258 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
260 return tcp_lhashfn(sk->num);
263 #define MAX_TCP_HEADER (128 + MAX_HEADER)
266 * Never offer a window over 32767 without using window scaling. Some
267 * poor stacks do signed 16bit maths!
269 #define MAX_TCP_WINDOW 32767U
271 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
272 #define TCP_MIN_MSS 88U
274 /* Minimal RCV_MSS. */
275 #define TCP_MIN_RCVMSS 536U
277 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
278 #define TCP_FASTRETRANS_THRESH 3
280 /* Maximal reordering. */
281 #define TCP_MAX_REORDERING 127
283 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
284 #define TCP_MAX_QUICKACKS 16U
286 /* urg_data states */
287 #define TCP_URG_VALID 0x0100
288 #define TCP_URG_NOTYET 0x0200
289 #define TCP_URG_READ 0x0400
291 #define TCP_RETR1 3 /*
292 * This is how many retries it does before it
293 * tries to figure out if the gateway is
294 * down. Minimal RFC value is 3; it corresponds
295 * to ~3sec-8min depending on RTO.
298 #define TCP_RETR2 15 /*
299 * This should take at least
300 * 90 minutes to time out.
301 * RFC1122 says that the limit is 100 sec.
302 * 15 is ~13-30min depending on RTO.
305 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
306 * connection: ~180sec is RFC minumum */
308 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
309 * connection: ~180sec is RFC minumum */
312 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
313 * socket. 7 is ~50sec-16min.
317 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
318 * state, about 60 seconds */
319 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
320 /* BSD style FIN_WAIT2 deadlock breaker.
321 * It used to be 3min, new value is 60sec,
322 * to combine FIN-WAIT-2 timeout with
326 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
328 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
329 #define TCP_ATO_MIN ((unsigned)(HZ/25))
331 #define TCP_DELACK_MIN 4U
332 #define TCP_ATO_MIN 4U
334 #define TCP_RTO_MAX ((unsigned)(120*HZ))
335 #define TCP_RTO_MIN ((unsigned)(HZ/5))
336 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
338 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
339 * for local resources.
342 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
343 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
344 #define TCP_KEEPALIVE_INTVL (75*HZ)
346 #define MAX_TCP_KEEPIDLE 32767
347 #define MAX_TCP_KEEPINTVL 32767
348 #define MAX_TCP_KEEPCNT 127
349 #define MAX_TCP_SYNCNT 127
351 /* TIME_WAIT reaping mechanism. */
352 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
353 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
355 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
356 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
358 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
359 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
360 * after this time. It should be equal
361 * (or greater than) TCP_TIMEWAIT_LEN
362 * to provide reliability equal to one
363 * provided by timewait state.
365 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
366 * timestamps. It must be less than
367 * minimal timewait lifetime.
370 #define TCP_TW_RECYCLE_SLOTS_LOG 5
371 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
373 /* If time > 4sec, it is "slow" path, no recycling is required,
374 so that we select tick to get range about 4 seconds.
377 #if HZ <= 16 || HZ > 4096
378 # error Unsupported: HZ <= 16 or HZ > 4096
380 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
382 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
384 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
386 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
388 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
390 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
392 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
394 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
401 #define TCPOPT_NOP 1 /* Padding */
402 #define TCPOPT_EOL 0 /* End of options */
403 #define TCPOPT_MSS 2 /* Segment size negotiating */
404 #define TCPOPT_WINDOW 3 /* Window scaling */
405 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
406 #define TCPOPT_SACK 5 /* SACK Block */
407 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
413 #define TCPOLEN_MSS 4
414 #define TCPOLEN_WINDOW 3
415 #define TCPOLEN_SACK_PERM 2
416 #define TCPOLEN_TIMESTAMP 10
418 /* But this is what stacks really send out. */
419 #define TCPOLEN_TSTAMP_ALIGNED 12
420 #define TCPOLEN_WSCALE_ALIGNED 4
421 #define TCPOLEN_SACKPERM_ALIGNED 4
422 #define TCPOLEN_SACK_BASE 2
423 #define TCPOLEN_SACK_BASE_ALIGNED 4
424 #define TCPOLEN_SACK_PERBLOCK 8
426 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
427 #define TCP_TIME_DACK 2 /* Delayed ack timer */
428 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
429 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
431 /* sysctl variables for tcp */
432 extern int sysctl_max_syn_backlog;
433 extern int sysctl_tcp_timestamps;
434 extern int sysctl_tcp_window_scaling;
435 extern int sysctl_tcp_sack;
436 extern int sysctl_tcp_fin_timeout;
437 extern int sysctl_tcp_tw_recycle;
438 extern int sysctl_tcp_keepalive_time;
439 extern int sysctl_tcp_keepalive_probes;
440 extern int sysctl_tcp_keepalive_intvl;
441 extern int sysctl_tcp_syn_retries;
442 extern int sysctl_tcp_synack_retries;
443 extern int sysctl_tcp_retries1;
444 extern int sysctl_tcp_retries2;
445 extern int sysctl_tcp_orphan_retries;
446 extern int sysctl_tcp_syncookies;
447 extern int sysctl_tcp_retrans_collapse;
448 extern int sysctl_tcp_stdurg;
449 extern int sysctl_tcp_rfc1337;
450 extern int sysctl_tcp_abort_on_overflow;
451 extern int sysctl_tcp_max_orphans;
452 extern int sysctl_tcp_max_tw_buckets;
453 extern int sysctl_tcp_fack;
454 extern int sysctl_tcp_reordering;
455 extern int sysctl_tcp_ecn;
456 extern int sysctl_tcp_dsack;
457 extern int sysctl_tcp_mem[3];
458 extern int sysctl_tcp_wmem[3];
459 extern int sysctl_tcp_rmem[3];
460 extern int sysctl_tcp_app_win;
461 extern int sysctl_tcp_adv_win_scale;
462 extern int sysctl_tcp_tw_reuse;
463 extern int sysctl_tcp_frto;
464 extern int sysctl_tcp_low_latency;
466 extern atomic_t tcp_memory_allocated;
467 extern atomic_t tcp_sockets_allocated;
468 extern int tcp_memory_pressure;
472 struct or_calltable {
474 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
475 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
476 void (*destructor) (struct open_request *req);
477 void (*send_reset) (struct sk_buff *skb);
480 struct tcp_v4_open_req {
483 struct ip_options *opt;
486 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
487 struct tcp_v6_open_req {
488 struct in6_addr loc_addr;
489 struct in6_addr rmt_addr;
490 struct sk_buff *pktopts;
495 /* this structure is too big */
496 struct open_request {
497 struct open_request *dl_next; /* Must be first member! */
504 __u16 snd_wscale : 4,
511 /* The following two fields can be easily recomputed I think -AK */
512 __u32 window_clamp; /* window clamp at creation time */
513 __u32 rcv_wnd; /* rcv_wnd offered first time */
515 unsigned long expires;
516 struct or_calltable *class;
519 struct tcp_v4_open_req v4_req;
520 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
521 struct tcp_v6_open_req v6_req;
526 /* SLAB cache for open requests. */
527 extern kmem_cache_t *tcp_openreq_cachep;
529 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
530 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
532 static inline void tcp_openreq_free(struct open_request *req)
534 req->class->destructor(req);
535 tcp_openreq_fastfree(req);
538 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
539 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
541 #define TCP_INET_FAMILY(fam) 1
545 * Pointers to address related TCP functions
546 * (i.e. things that depend on the address family)
548 * BUGGG_FUTURE: all the idea behind this struct is wrong.
549 * It mixes socket frontend with transport function.
550 * With port sharing between IPv6/v4 it gives the only advantage,
551 * only poor IPv6 needs to permanently recheck, that it
552 * is still IPv6 8)8) It must be cleaned up as soon as possible.
557 int (*queue_xmit) (struct sk_buff *skb);
559 void (*send_check) (struct sock *sk,
562 struct sk_buff *skb);
564 int (*rebuild_header) (struct sock *sk);
566 int (*conn_request) (struct sock *sk,
567 struct sk_buff *skb);
569 struct sock * (*syn_recv_sock) (struct sock *sk,
571 struct open_request *req,
572 struct dst_entry *dst);
574 int (*remember_stamp) (struct sock *sk);
576 __u16 net_header_len;
578 int (*setsockopt) (struct sock *sk,
584 int (*getsockopt) (struct sock *sk,
591 void (*addr2sockaddr) (struct sock *sk,
598 * The next routines deal with comparing 32 bit unsigned ints
599 * and worry about wraparound (automatic with unsigned arithmetic).
602 extern __inline int before(__u32 seq1, __u32 seq2)
604 return (__s32)(seq1-seq2) < 0;
607 extern __inline int after(__u32 seq1, __u32 seq2)
609 return (__s32)(seq2-seq1) < 0;
613 /* is s2<=s1<=s3 ? */
614 extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
616 return seq3 - seq2 >= seq1 - seq2;
620 extern struct proto tcp_prot;
622 extern struct tcp_mib tcp_statistics[NR_CPUS*2];
623 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
624 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
625 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
627 extern void tcp_put_port(struct sock *sk);
628 extern void __tcp_put_port(struct sock *sk);
629 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
631 extern void tcp_v4_err(struct sk_buff *skb, u32);
633 extern void tcp_shutdown (struct sock *sk, int how);
635 extern int tcp_v4_rcv(struct sk_buff *skb);
637 extern int tcp_v4_remember_stamp(struct sock *sk);
639 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
641 extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
642 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
644 extern int tcp_ioctl(struct sock *sk,
648 extern int tcp_rcv_state_process(struct sock *sk,
653 extern int tcp_rcv_established(struct sock *sk,
665 static inline void tcp_schedule_ack(struct tcp_opt *tp)
667 tp->ack.pending |= TCP_ACK_SCHED;
670 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
672 return tp->ack.pending&TCP_ACK_SCHED;
675 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
677 if (tp->ack.quick && --tp->ack.quick == 0) {
678 /* Leaving quickack mode we deflate ATO. */
679 tp->ack.ato = TCP_ATO_MIN;
683 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
685 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
687 memset(&tp->ack, 0, sizeof(tp->ack));
690 static inline void tcp_clear_options(struct tcp_opt *tp)
692 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
704 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
709 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
710 struct open_request *req,
711 struct open_request **prev);
712 extern int tcp_child_process(struct sock *parent,
714 struct sk_buff *skb);
715 extern void tcp_enter_frto(struct sock *sk);
716 extern void tcp_enter_loss(struct sock *sk, int how);
717 extern void tcp_clear_retrans(struct tcp_opt *tp);
718 extern void tcp_update_metrics(struct sock *sk);
720 extern void tcp_close(struct sock *sk,
722 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
723 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
724 extern void tcp_write_space(struct sock *sk);
726 extern int tcp_getsockopt(struct sock *sk, int level,
727 int optname, char *optval,
729 extern int tcp_setsockopt(struct sock *sk, int level,
730 int optname, char *optval,
732 extern void tcp_set_keepalive(struct sock *sk, int val);
733 extern int tcp_recvmsg(struct sock *sk,
735 int len, int nonblock,
736 int flags, int *addr_len);
738 extern int tcp_listen_start(struct sock *sk);
740 extern void tcp_parse_options(struct sk_buff *skb,
745 * TCP v4 functions exported for the inet6 API
748 extern int tcp_v4_rebuild_header(struct sock *sk);
750 extern int tcp_v4_build_header(struct sock *sk,
751 struct sk_buff *skb);
753 extern void tcp_v4_send_check(struct sock *sk,
754 struct tcphdr *th, int len,
755 struct sk_buff *skb);
757 extern int tcp_v4_conn_request(struct sock *sk,
758 struct sk_buff *skb);
760 extern struct sock * tcp_create_openreq_child(struct sock *sk,
761 struct open_request *req,
762 struct sk_buff *skb);
764 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
766 struct open_request *req,
767 struct dst_entry *dst);
769 extern int tcp_v4_do_rcv(struct sock *sk,
770 struct sk_buff *skb);
772 extern int tcp_v4_connect(struct sock *sk,
773 struct sockaddr *uaddr,
776 extern int tcp_connect(struct sock *sk);
778 extern struct sk_buff * tcp_make_synack(struct sock *sk,
779 struct dst_entry *dst,
780 struct open_request *req);
782 extern int tcp_disconnect(struct sock *sk, int flags);
784 extern void tcp_unhash(struct sock *sk);
786 extern int tcp_v4_hash_connecting(struct sock *sk);
789 /* From syncookies.c */
790 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
791 struct ip_options *opt);
792 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
797 extern int tcp_write_xmit(struct sock *, int nonagle);
798 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
799 extern void tcp_xmit_retransmit_queue(struct sock *);
800 extern void tcp_simple_retransmit(struct sock *);
802 extern void tcp_send_probe0(struct sock *);
803 extern void tcp_send_partial(struct sock *);
804 extern int tcp_write_wakeup(struct sock *);
805 extern void tcp_send_fin(struct sock *sk);
806 extern void tcp_send_active_reset(struct sock *sk, int priority);
807 extern int tcp_send_synack(struct sock *);
808 extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
809 extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
810 extern void tcp_push_one(struct sock *, unsigned mss_now);
811 extern void tcp_send_ack(struct sock *sk);
812 extern void tcp_send_delayed_ack(struct sock *sk);
815 extern void tcp_init_xmit_timers(struct sock *);
816 extern void tcp_clear_xmit_timers(struct sock *);
818 extern void tcp_delete_keepalive_timer (struct sock *);
819 extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
820 extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
822 extern const char timer_bug_msg[];
824 /* Read 'sendfile()'-style from a TCP socket */
825 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
826 unsigned int, size_t);
827 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
828 sk_read_actor_t recv_actor);
830 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
832 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
835 case TCP_TIME_RETRANS:
836 case TCP_TIME_PROBE0:
839 #ifdef TCP_CLEAR_TIMERS
840 if (timer_pending(&tp->retransmit_timer) &&
841 del_timer(&tp->retransmit_timer))
849 #ifdef TCP_CLEAR_TIMERS
850 if (timer_pending(&tp->delack_timer) &&
851 del_timer(&tp->delack_timer))
856 printk(timer_bug_msg);
863 * Reset the retransmission timer
865 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
867 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
869 if (when > TCP_RTO_MAX) {
871 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
877 case TCP_TIME_RETRANS:
878 case TCP_TIME_PROBE0:
880 tp->timeout = jiffies+when;
881 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
886 tp->ack.pending |= TCP_ACK_TIMER;
887 tp->ack.timeout = jiffies+when;
888 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
893 printk(KERN_DEBUG "bug: unknown timer value\n");
897 /* Compute the current effective MSS, taking SACKs and IP options,
898 * and even PMTU discovery events into account.
901 static __inline__ unsigned int tcp_current_mss(struct sock *sk)
903 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
904 struct dst_entry *dst = __sk_dst_get(sk);
905 int mss_now = tp->mss_cache;
907 if (dst && dst->pmtu != tp->pmtu_cookie)
908 mss_now = tcp_sync_mss(sk, dst->pmtu);
911 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
912 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
916 /* Initialize RCV_MSS value.
917 * RCV_MSS is an our guess about MSS used by the peer.
918 * We haven't any direct information about the MSS.
919 * It's better to underestimate the RCV_MSS rather than overestimate.
920 * Overestimations make us ACKing less frequently than needed.
921 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
924 static inline void tcp_initialize_rcv_mss(struct sock *sk)
926 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
927 unsigned int hint = min(tp->advmss, tp->mss_cache);
929 hint = min(hint, tp->rcv_wnd/2);
930 hint = min(hint, TCP_MIN_RCVMSS);
931 hint = max(hint, TCP_MIN_MSS);
933 tp->ack.rcv_mss = hint;
936 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
938 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
939 ntohl(TCP_FLAG_ACK) |
943 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
945 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
948 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
950 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
952 atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
954 tcp_fast_path_on(tp);
957 /* Compute the actual receive window we are currently advertising.
958 * Rcv_nxt can be after the window if our peer push more data
959 * than the offered window.
961 static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
963 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
970 /* Choose a new window, without checks for shrinking, and without
971 * scaling applied to the result. The caller does these things
972 * if necessary. This is a "raw" window selection.
974 extern u32 __tcp_select_window(struct sock *sk);
976 /* TCP timestamps are only 32-bits, this causes a slight
977 * complication on 64-bit systems since we store a snapshot
978 * of jiffies in the buffer control blocks below. We decidely
979 * only use of the low 32-bits of jiffies and hide the ugly
980 * casts with the following macro.
982 #define tcp_time_stamp ((__u32)(jiffies))
984 /* This is what the send packet queueing engine uses to pass
985 * TCP per-packet control information to the transmission
986 * code. We also store the host-order sequence numbers in
987 * here too. This is 36 bytes on 32-bit architectures,
988 * 40 bytes on 64-bit machines, if this grows please adjust
989 * skbuff.h:skbuff->cb[xxx] size appropriately.
993 struct inet_skb_parm h4;
994 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
995 struct inet6_skb_parm h6;
997 } header; /* For incoming frames */
998 __u32 seq; /* Starting sequence number */
999 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1000 __u32 when; /* used to compute rtt's */
1001 __u8 flags; /* TCP header flags. */
1003 /* NOTE: These must match up to the flags byte in a
1006 #define TCPCB_FLAG_FIN 0x01
1007 #define TCPCB_FLAG_SYN 0x02
1008 #define TCPCB_FLAG_RST 0x04
1009 #define TCPCB_FLAG_PSH 0x08
1010 #define TCPCB_FLAG_ACK 0x10
1011 #define TCPCB_FLAG_URG 0x20
1012 #define TCPCB_FLAG_ECE 0x40
1013 #define TCPCB_FLAG_CWR 0x80
1015 __u8 sacked; /* State flags for SACK/FACK. */
1016 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1017 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1018 #define TCPCB_LOST 0x04 /* SKB is lost */
1019 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1021 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1022 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1024 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1026 #define TCPCB_AT_TAIL (TCPCB_URG)
1028 __u16 urg_ptr; /* Valid w/URG flags is set. */
1029 __u32 ack_seq; /* Sequence number ACK'd */
1032 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1034 #define for_retrans_queue(skb, sk, tp) \
1035 for (skb = (sk)->write_queue.next; \
1036 (skb != (tp)->send_head) && \
1037 (skb != (struct sk_buff *)&(sk)->write_queue); \
1041 #include <net/tcp_ecn.h>
1045 * Compute minimal free write space needed to queue new packets.
1047 static inline int tcp_min_write_space(struct sock *sk)
1049 return sk->wmem_queued/2;
1052 static inline int tcp_wspace(struct sock *sk)
1054 return sk->sndbuf - sk->wmem_queued;
1058 /* This determines how many packets are "in the network" to the best
1059 * of our knowledge. In many cases it is conservative, but where
1060 * detailed information is available from the receiver (via SACK
1061 * blocks etc.) we can make more aggressive calculations.
1063 * Use this for decisions involving congestion control, use just
1064 * tp->packets_out to determine if the send queue is empty or not.
1066 * Read this equation as:
1068 * "Packets sent once on transmission queue" MINUS
1069 * "Packets left network, but not honestly ACKed yet" PLUS
1070 * "Packets fast retransmitted"
1072 static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
1074 return tp->packets_out - tp->left_out + tp->retrans_out;
1077 /* Recalculate snd_ssthresh, we want to set it to:
1079 * one half the current congestion window, but no
1080 * less than two segments
1082 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1084 return max(tp->snd_cwnd >> 1U, 2U);
1087 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1088 * The exception is rate halving phase, when cwnd is decreasing towards
1091 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1093 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1094 return tp->snd_ssthresh;
1096 return max(tp->snd_ssthresh,
1097 ((tp->snd_cwnd >> 1) +
1098 (tp->snd_cwnd >> 2)));
1101 static inline void tcp_sync_left_out(struct tcp_opt *tp)
1103 if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
1104 tp->sacked_out = tp->packets_out - tp->lost_out;
1105 tp->left_out = tp->sacked_out + tp->lost_out;
1108 extern void tcp_cwnd_application_limited(struct sock *sk);
1110 /* Congestion window validation. (RFC2861) */
1112 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1114 if (tp->packets_out >= tp->snd_cwnd) {
1115 /* Network is feed fully. */
1116 tp->snd_cwnd_used = 0;
1117 tp->snd_cwnd_stamp = tcp_time_stamp;
1119 /* Network starves. */
1120 if (tp->packets_out > tp->snd_cwnd_used)
1121 tp->snd_cwnd_used = tp->packets_out;
1123 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1124 tcp_cwnd_application_limited(sk);
1128 /* Set slow start threshould and cwnd not falling to slow start */
1129 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1131 tp->undo_marker = 0;
1132 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1133 tp->snd_cwnd = min(tp->snd_cwnd,
1134 tcp_packets_in_flight(tp) + 1U);
1135 tp->snd_cwnd_cnt = 0;
1136 tp->high_seq = tp->snd_nxt;
1137 tp->snd_cwnd_stamp = tcp_time_stamp;
1138 TCP_ECN_queue_cwr(tp);
1141 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1143 tp->prior_ssthresh = 0;
1144 if (tp->ca_state < TCP_CA_CWR) {
1145 __tcp_enter_cwr(tp);
1146 tp->ca_state = TCP_CA_CWR;
1150 extern __u32 tcp_init_cwnd(struct tcp_opt *tp);
1152 /* Slow start with delack produces 3 packets of burst, so that
1153 * it is safe "de facto".
1155 static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
1160 static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
1162 return after(tp->snd_sml,tp->snd_una) &&
1163 !after(tp->snd_sml, tp->snd_nxt);
1166 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
1169 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1172 /* Return 0, if packet can be sent now without violation Nagle's rules:
1173 1. It is full sized.
1174 2. Or it contains FIN.
1175 3. Or TCP_NODELAY was set.
1176 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1177 With Minshall's modification: all sent small packets are ACKed.
1180 static __inline__ int
1181 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
1183 return (skb->len < mss_now &&
1184 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1188 tcp_minshall_check(tp))));
1191 /* This checks if the data bearing packet SKB (usually tp->send_head)
1192 * should be put on the wire right now.
1194 static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
1195 unsigned cur_mss, int nonagle)
1197 /* RFC 1122 - section 4.2.3.4
1201 * a) The right edge of this frame exceeds the window
1202 * b) There are packets in flight and we have a small segment
1203 * [SWS avoidance and Nagle algorithm]
1204 * (part of SWS is done on packetization)
1205 * Minshall version sounds: there are no _small_
1206 * segments in flight. (tcp_nagle_check)
1207 * c) We have too many packets 'in flight'
1209 * Don't use the nagle rule for urgent data (or
1210 * for the final FIN -DaveM).
1212 * Also, Nagle rule does not apply to frames, which
1213 * sit in the middle of queue (they have no chances
1214 * to get new data) and if room at tail of skb is
1215 * not enough to save something seriously (<32 for now).
1218 /* Don't be strict about the congestion window for the
1219 * final FIN frame. -DaveM
1221 return ((nonagle==1 || tp->urg_mode
1222 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1223 ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
1224 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1225 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1228 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1230 if (!tp->packets_out && !tp->pending)
1231 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1234 static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
1236 return (skb->next == (struct sk_buff*)&sk->write_queue);
1239 /* Push out any pending frames which were held back due to
1240 * TCP_CORK or attempt at coalescing tiny packets.
1241 * The socket must be locked by the caller.
1243 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1248 struct sk_buff *skb = tp->send_head;
1251 if (!tcp_skb_is_last(sk, skb))
1253 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1254 tcp_write_xmit(sk, nonagle))
1255 tcp_check_probe_timer(sk, tp);
1257 tcp_cwnd_validate(sk, tp);
1260 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1263 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);
1266 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1268 struct sk_buff *skb = tp->send_head;
1271 tcp_snd_test(tp, skb, tcp_current_mss(sk),
1272 tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
1275 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1280 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1285 extern void tcp_destroy_sock(struct sock *sk);
1289 * Calculate(/check) TCP checksum
1291 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1292 unsigned long saddr, unsigned long daddr,
1295 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1298 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1300 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1303 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1305 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1306 __tcp_checksum_complete(skb);
1309 /* Prequeue for VJ style copy to user, combined with checksumming. */
1311 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1313 tp->ucopy.task = NULL;
1315 tp->ucopy.memory = 0;
1316 skb_queue_head_init(&tp->ucopy.prequeue);
1319 /* Packet is added to VJ-style prequeue for processing in process
1320 * context, if a reader task is waiting. Apparently, this exciting
1321 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1322 * failed somewhere. Latency? Burstiness? Well, at least now we will
1323 * see, why it failed. 8)8) --ANK
1325 * NOTE: is this not too big to inline?
1327 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1329 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1331 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1332 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1333 tp->ucopy.memory += skb->truesize;
1334 if (tp->ucopy.memory > sk->rcvbuf) {
1335 struct sk_buff *skb1;
1340 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1341 sk->backlog_rcv(sk, skb1);
1342 NET_INC_STATS_BH(TCPPrequeueDropped);
1345 tp->ucopy.memory = 0;
1346 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1347 wake_up_interruptible(sk->sleep);
1348 if (!tcp_ack_scheduled(tp))
1349 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1360 static char *statename[]={
1361 "Unused","Established","Syn Sent","Syn Recv",
1362 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1363 "Close Wait","Last ACK","Listen","Closing"
1367 static __inline__ void tcp_set_state(struct sock *sk, int state)
1369 int oldstate = sk->state;
1372 case TCP_ESTABLISHED:
1373 if (oldstate != TCP_ESTABLISHED)
1374 TCP_INC_STATS(TcpCurrEstab);
1378 sk->prot->unhash(sk);
1379 if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
1383 if (oldstate==TCP_ESTABLISHED)
1384 tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
1387 /* Change state AFTER socket is unhashed to avoid closed
1388 * socket sitting in hash tables.
1393 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1397 static __inline__ void tcp_done(struct sock *sk)
1399 tcp_set_state(sk, TCP_CLOSE);
1400 tcp_clear_xmit_timers(sk);
1402 sk->shutdown = SHUTDOWN_MASK;
1405 sk->state_change(sk);
1407 tcp_destroy_sock(sk);
1410 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1417 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1419 if (tp->tstamp_ok) {
1420 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1421 (TCPOPT_NOP << 16) |
1422 (TCPOPT_TIMESTAMP << 8) |
1424 *ptr++ = htonl(tstamp);
1425 *ptr++ = htonl(tp->ts_recent);
1427 if (tp->eff_sacks) {
1428 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1431 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1432 (TCPOPT_NOP << 16) |
1433 (TCPOPT_SACK << 8) |
1434 (TCPOLEN_SACK_BASE +
1435 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1436 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1437 *ptr++ = htonl(sp[this_sack].start_seq);
1438 *ptr++ = htonl(sp[this_sack].end_seq);
1447 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1448 * If this is every changed make sure to change the definition of
1449 * MAX_SYN_SIZE to match the new maximum number of options that you
1452 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1453 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1455 /* We always get an MSS option.
1456 * The option bytes which will be seen in normal data
1457 * packets should timestamps be used, must be in the MSS
1458 * advertised. But we subtract them from tp->mss_cache so
1459 * that calculations in tcp_sendmsg are simpler etc.
1460 * So account for this fact here if necessary. If we
1461 * don't do this correctly, as a receiver we won't
1462 * recognize data packets as being full sized when we
1463 * should, and thus we won't abide by the delayed ACK
1465 * SACKs don't matter, we never delay an ACK when we
1466 * have any of those going out.
1468 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1471 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1472 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1474 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1475 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1476 *ptr++ = htonl(tstamp); /* TSVAL */
1477 *ptr++ = htonl(ts_recent); /* TSECR */
1479 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1480 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1482 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1485 /* Determine a window scaling and initial window to offer.
1486 * Based on the assumption that the given amount of space
1487 * will be offered. Store the results in the tp structure.
1488 * NOTE: for smooth operation initial space offering should
1489 * be a multiple of mss if possible. We assume here that mss >= 1.
1490 * This MUST be enforced by all callers.
1492 static inline void tcp_select_initial_window(int __space, __u32 mss,
1494 __u32 *window_clamp,
1498 unsigned int space = (__space < 0 ? 0 : __space);
1500 /* If no clamp set the clamp to the max possible scaled window */
1501 if (*window_clamp == 0)
1502 (*window_clamp) = (65535 << 14);
1503 space = min(*window_clamp, space);
1505 /* Quantize space offering to a multiple of mss if possible. */
1507 space = (space / mss) * mss;
1509 /* NOTE: offering an initial window larger than 32767
1510 * will break some buggy TCP stacks. We try to be nice.
1511 * If we are not window scaling, then this truncates
1512 * our initial window offering to 32k. There should also
1513 * be a sysctl option to stop being nice.
1515 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1518 /* See RFC1323 for an explanation of the limit to 14 */
1519 while (space > 65535 && (*rcv_wscale) < 14) {
1523 if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
1524 space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
1528 /* Set initial window to value enough for senders,
1529 * following RFC1414. Senders, not following this RFC,
1530 * will be satisfied with 2.
1532 if (mss > (1<<*rcv_wscale)) {
1536 else if (mss > 1460)
1538 if (*rcv_wnd > init_cwnd*mss)
1539 *rcv_wnd = init_cwnd*mss;
1541 /* Set the clamp no higher than max representable value */
1542 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
1545 static inline int tcp_win_from_space(int space)
1547 return sysctl_tcp_adv_win_scale<=0 ?
1548 (space>>(-sysctl_tcp_adv_win_scale)) :
1549 space - (space>>sysctl_tcp_adv_win_scale);
1552 /* Note: caller must be prepared to deal with negative returns */
1553 static inline int tcp_space(struct sock *sk)
1555 return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
1558 static inline int tcp_full_space( struct sock *sk)
1560 return tcp_win_from_space(sk->rcvbuf);
1563 static inline void tcp_acceptq_removed(struct sock *sk)
1568 static inline void tcp_acceptq_added(struct sock *sk)
1573 static inline int tcp_acceptq_is_full(struct sock *sk)
1575 return sk->ack_backlog > sk->max_ack_backlog;
1578 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1581 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1584 tcp_acceptq_added(sk);
1586 if (!tp->accept_queue_tail) {
1587 tp->accept_queue = req;
1589 tp->accept_queue_tail->dl_next = req;
1591 tp->accept_queue_tail = req;
1592 req->dl_next = NULL;
1595 struct tcp_listen_opt
1597 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1601 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1605 tcp_synq_removed(struct sock *sk, struct open_request *req)
1607 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1609 if (--lopt->qlen == 0)
1610 tcp_delete_keepalive_timer(sk);
1611 if (req->retrans == 0)
1615 static inline void tcp_synq_added(struct sock *sk)
1617 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1619 if (lopt->qlen++ == 0)
1620 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1624 static inline int tcp_synq_len(struct sock *sk)
1626 return sk->tp_pinfo.af_tcp.listen_opt->qlen;
1629 static inline int tcp_synq_young(struct sock *sk)
1631 return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
1634 static inline int tcp_synq_is_full(struct sock *sk)
1636 return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
1639 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1640 struct open_request **prev)
1642 write_lock(&tp->syn_wait_lock);
1643 *prev = req->dl_next;
1644 write_unlock(&tp->syn_wait_lock);
1647 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1648 struct open_request **prev)
1650 tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
1651 tcp_synq_removed(sk, req);
1652 tcp_openreq_free(req);
1655 static __inline__ void tcp_openreq_init(struct open_request *req,
1657 struct sk_buff *skb)
1659 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1660 req->rcv_isn = TCP_SKB_CB(skb)->seq;
1661 req->mss = tp->mss_clamp;
1662 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
1663 req->tstamp_ok = tp->tstamp_ok;
1664 req->sack_ok = tp->sack_ok;
1665 req->snd_wscale = tp->snd_wscale;
1666 req->wscale_ok = tp->wscale_ok;
1669 req->rmt_port = skb->h.th->source;
1672 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
1674 static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
1676 sk->tp_pinfo.af_tcp.queue_shrunk = 1;
1677 sk->wmem_queued -= skb->truesize;
1678 sk->forward_alloc += skb->truesize;
1682 static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
1684 sk->wmem_queued += skb->truesize;
1685 sk->forward_alloc -= skb->truesize;
1688 extern void __tcp_mem_reclaim(struct sock *sk);
1689 extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
1691 static inline void tcp_mem_reclaim(struct sock *sk)
1693 if (sk->forward_alloc >= TCP_MEM_QUANTUM)
1694 __tcp_mem_reclaim(sk);
1697 static inline void tcp_enter_memory_pressure(void)
1699 if (!tcp_memory_pressure) {
1700 NET_INC_STATS(TCPMemoryPressures);
1701 tcp_memory_pressure = 1;
1705 static inline void tcp_moderate_sndbuf(struct sock *sk)
1707 if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
1708 sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
1709 sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
1713 static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
1715 struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
1718 skb->truesize += mem;
1719 if (sk->forward_alloc >= (int)skb->truesize ||
1720 tcp_mem_schedule(sk, skb->truesize, 0)) {
1721 skb_reserve(skb, MAX_TCP_HEADER);
1726 tcp_enter_memory_pressure();
1727 tcp_moderate_sndbuf(sk);
1732 static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
1734 return tcp_alloc_pskb(sk, size, 0, gfp);
1737 static inline struct page * tcp_alloc_page(struct sock *sk)
1739 if (sk->forward_alloc >= (int)PAGE_SIZE ||
1740 tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
1741 struct page *page = alloc_pages(sk->allocation, 0);
1745 tcp_enter_memory_pressure();
1746 tcp_moderate_sndbuf(sk);
1750 static inline void tcp_writequeue_purge(struct sock *sk)
1752 struct sk_buff *skb;
1754 while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
1755 tcp_free_skb(sk, skb);
1756 tcp_mem_reclaim(sk);
1759 extern void tcp_rfree(struct sk_buff *skb);
1761 static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
1764 skb->destructor = tcp_rfree;
1765 atomic_add(skb->truesize, &sk->rmem_alloc);
1766 sk->forward_alloc -= skb->truesize;
1769 extern void tcp_listen_wlock(void);
1771 /* - We may sleep inside this lock.
1772 * - If sleeping is not required (or called from BH),
1773 * use plain read_(un)lock(&tcp_lhash_lock).
1776 static inline void tcp_listen_lock(void)
1778 /* read_lock synchronizes to candidates to writers */
1779 read_lock(&tcp_lhash_lock);
1780 atomic_inc(&tcp_lhash_users);
1781 read_unlock(&tcp_lhash_lock);
1784 static inline void tcp_listen_unlock(void)
1786 if (atomic_dec_and_test(&tcp_lhash_users))
1787 wake_up(&tcp_lhash_wait);
1790 static inline int keepalive_intvl_when(struct tcp_opt *tp)
1792 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1795 static inline int keepalive_time_when(struct tcp_opt *tp)
1797 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1800 static inline int tcp_fin_time(struct tcp_opt *tp)
1802 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1804 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1805 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1810 static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
1812 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
1814 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
1817 /* RST segments are not recommended to carry timestamp,
1818 and, if they do, it is recommended to ignore PAWS because
1819 "their cleanup function should take precedence over timestamps."
1820 Certainly, it is mistake. It is necessary to understand the reasons
1821 of this constraint to relax it: if peer reboots, clock may go
1822 out-of-sync and half-open connections will not be reset.
1823 Actually, the problem would be not existing if all
1824 the implementations followed draft about maintaining clock
1825 via reboots. Linux-2.2 DOES NOT!
1827 However, we can relax time bounds for RST segments to MSL.
1829 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
1834 #define TCP_CHECK_TIMER(sk) do { } while (0)
1836 static inline int tcp_use_frto(const struct sock *sk)
1838 const struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1840 /* F-RTO must be activated in sysctl and there must be some
1841 * unsent new data, and the advertised window should allow
1844 return (sysctl_tcp_frto && tp->send_head &&
1845 !after(TCP_SKB_CB(tp->send_head)->end_seq,
1846 tp->snd_una + tp->snd_wnd));