2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the AF_INET socket handler.
8 * Version: @(#)sock.h 1.0.4 05/13/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
36 #include <linux/config.h>
37 #include <linux/timer.h>
38 #include <linux/cache.h>
39 #include <linux/in.h> /* struct sockaddr_in */
41 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
42 #include <linux/in6.h> /* struct sockaddr_in6 */
43 #include <linux/ipv6.h> /* dest_cache, inet6_options */
44 #include <linux/icmpv6.h>
45 #include <net/if_inet6.h> /* struct ipv6_mc_socklist */
48 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
49 #include <linux/icmp.h>
51 #include <linux/tcp.h> /* struct tcphdr */
53 #include <linux/netdevice.h>
54 #include <linux/skbuff.h> /* struct sk_buff */
55 #include <net/protocol.h> /* struct inet_protocol */
56 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
59 #if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
60 #include <linux/if_wanpipe.h>
63 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
65 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
66 #include <net/netrom.h>
68 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
73 #if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
74 #include <linux/if_pppox.h>
75 #include <linux/ppp_channel.h> /* struct ppp_channel */
78 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
79 #if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
83 #endif /* CONFIG_SPX */
84 #endif /* CONFIG_IPX */
86 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
87 #include <linux/atalk.h>
90 #if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
94 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
95 #include <net/irda/irda.h>
98 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
103 #include <linux/filter.h>
106 #include <asm/atomic.h>
110 /* The AF_UNIX specific socket options */
112 struct unix_address *addr;
113 struct dentry * dentry;
114 struct vfsmount * mnt;
115 struct semaphore readsem;
118 struct sock * gc_tree;
121 wait_queue_head_t peer_wait;
125 /* Once the IPX ncpd patches are in these are going into protinfo. */
126 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
128 ipx_address dest_addr;
129 ipx_interface *intrfc;
131 #ifdef CONFIG_IPX_INTERN
132 unsigned char node[IPX_NODE_LEN];
136 * To handle special ncp connection-handling sockets for mars_nwe,
137 * the connection number must be stored in the socket.
139 unsigned short ipx_ncp_conn;
143 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
145 struct in6_addr saddr;
146 struct in6_addr rcv_saddr;
147 struct in6_addr daddr;
148 struct in6_addr *daddr_cache;
156 /* pktoption flags */
177 struct ipv6_mc_socklist *ipv6_mc_list;
178 struct ipv6_fl_socklist *ipv6_fl_list;
181 struct ipv6_txoptions *opt;
182 struct sk_buff *pktoptions;
186 __u32 checksum; /* perform checksum */
187 __u32 offset; /* checksum offset */
189 struct icmp6_filter filter;
192 #define __ipv6_only_sock(sk) ((sk)->net_pinfo.af_inet6.ipv6only)
193 #define ipv6_only_sock(sk) ((sk)->family == PF_INET6 && \
194 (sk)->net_pinfo.af_inet6.ipv6only)
196 #define __ipv6_only_sock(sk) 0
197 #define ipv6_only_sock(sk) 0
200 #if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
202 struct icmp_filter filter;
206 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
209 int ttl; /* TTL setting */
212 struct ip_options *opt;
213 unsigned char hdrincl; /* Include headers ? */
214 __u8 mc_ttl; /* Multicasting TTL */
215 __u8 mc_loop; /* Loopback */
216 unsigned recverr : 1,
218 __u16 id; /* ID counter for DF pkts */
220 int mc_index; /* Multicast device index */
222 struct ip_mc_socklist *mc_list; /* Group array */
226 #if defined(CONFIG_PPPOE) || defined (CONFIG_PPPOE_MODULE)
229 struct net_device *dev; /* device associated with socket*/
230 struct pppoe_addr pa; /* what this socket is bound to*/
231 struct sockaddr_pppox relay; /* what socket data will be
232 relayed to (PPPoE relaying) */
237 struct ppp_channel chan;
239 struct pppox_opt *next; /* for hash table */
241 struct pppoe_opt pppoe;
244 #define pppoe_dev proto.pppoe.dev
245 #define pppoe_pa proto.pppoe.pa
246 #define pppoe_relay proto.pppoe.relay
249 /* This defines a selective acknowledgement block. */
250 struct tcp_sack_block {
256 int tcp_header_len; /* Bytes of tcp header to send */
259 * Header prediction flags
260 * 0x5?10 << 16 + snd_wnd in net byte order
265 * RFC793 variables by their proper names. This means you can
266 * read the code and the spec side by side (and laugh ...)
267 * See RFC793 and RFC1122. The RFC writes these in capitals.
269 __u32 rcv_nxt; /* What we want to receive next */
270 __u32 snd_nxt; /* Next sequence we send */
272 __u32 snd_una; /* First byte we want an ack for */
273 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
274 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
275 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
277 /* Delayed ACK control data */
279 __u8 pending; /* ACK is pending */
280 __u8 quick; /* Scheduled number of quick acks */
281 __u8 pingpong; /* The session is interactive */
282 __u8 blocked; /* Delayed ACK was blocked by socket lock*/
283 __u32 ato; /* Predicted tick of soft clock */
284 unsigned long timeout; /* Currently scheduled timeout */
285 __u32 lrcvtime; /* timestamp of last received data packet*/
286 __u16 last_seg_size; /* Size of last incoming segment */
287 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
290 /* Data for direct copy to user */
292 struct sk_buff_head prequeue;
293 struct task_struct *task;
299 __u32 snd_wl1; /* Sequence for window update */
300 __u32 snd_wnd; /* The window we expect to receive */
301 __u32 max_window; /* Maximal window ever seen from peer */
302 __u32 pmtu_cookie; /* Last pmtu seen by socket */
303 __u16 mss_cache; /* Cached effective mss, not including SACKS */
304 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
305 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
306 __u8 ca_state; /* State of fast-retransmit machine */
307 __u8 retransmits; /* Number of unrecovered RTO timeouts. */
309 __u8 reordering; /* Packet reordering metric. */
310 __u8 queue_shrunk; /* Write queue has been shrunk recently.*/
311 __u8 defer_accept; /* User waits for some data after accept() */
313 /* RTT measurement */
314 __u8 backoff; /* backoff */
315 __u32 srtt; /* smothed round trip time << 3 */
316 __u32 mdev; /* medium deviation */
317 __u32 mdev_max; /* maximal mdev for the last rtt period */
318 __u32 rttvar; /* smoothed mdev_max */
319 __u32 rtt_seq; /* sequence number to update rttvar */
320 __u32 rto; /* retransmit timeout */
322 __u32 packets_out; /* Packets which are "in flight" */
323 __u32 left_out; /* Packets which leaved network */
324 __u32 retrans_out; /* Retransmitted packets out */
328 * Slow start and congestion control (see also Nagle, and Karn & Partridge)
330 __u32 snd_ssthresh; /* Slow start size threshold */
331 __u32 snd_cwnd; /* Sending congestion window */
332 __u16 snd_cwnd_cnt; /* Linear increase counter */
333 __u16 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
335 __u32 snd_cwnd_stamp;
337 /* Two commonly used timers in both sender and receiver paths. */
338 unsigned long timeout;
339 struct timer_list retransmit_timer; /* Resend (no ack) */
340 struct timer_list delack_timer; /* Ack delay */
342 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
344 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
345 struct sk_buff *send_head; /* Front of stuff to transmit */
346 struct page *sndmsg_page; /* Cached page for sendmsg */
347 u32 sndmsg_off; /* Cached offset for sendmsg */
349 __u32 rcv_wnd; /* Current receiver window */
350 __u32 rcv_wup; /* rcv_nxt on last window update sent */
351 __u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
352 __u32 pushed_seq; /* Last pushed seq, required to talk to windows */
353 __u32 copied_seq; /* Head of yet unread data */
355 * Options received (usually on last packet, some only on SYN packets).
357 char tstamp_ok, /* TIMESTAMP seen on SYN packet */
358 wscale_ok, /* Wscale seen on SYN packet */
359 sack_ok; /* SACK seen on SYN packet */
360 char saw_tstamp; /* Saw TIMESTAMP on last packet */
361 __u8 snd_wscale; /* Window scaling received from sender */
362 __u8 rcv_wscale; /* Window scaling to send to receiver */
363 __u8 nonagle; /* Disable Nagle algorithm? */
364 __u8 keepalive_probes; /* num of allowed keep alive probes */
367 __u32 rcv_tsval; /* Time stamp value */
368 __u32 rcv_tsecr; /* Time stamp echo reply */
369 __u32 ts_recent; /* Time stamp to echo next */
370 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
373 __u16 user_mss; /* mss requested by user in ioctl */
374 __u8 dsack; /* D-SACK is scheduled */
375 __u8 eff_sacks; /* Size of SACK array to send with next packet */
376 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
377 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
379 __u32 window_clamp; /* Maximal window to advertise */
380 __u32 rcv_ssthresh; /* Current window clamp */
381 __u8 probes_out; /* unanswered 0 window probes */
382 __u8 num_sacks; /* Number of SACK blocks */
383 __u16 advmss; /* Advertised MSS */
385 __u8 syn_retries; /* num of allowed syn retries */
386 __u8 ecn_flags; /* ECN status bits. */
387 __u16 prior_ssthresh; /* ssthresh saved at recovery start */
388 __u32 lost_out; /* Lost packets */
389 __u32 sacked_out; /* SACK'd packets */
390 __u32 fackets_out; /* FACK'd packets */
391 __u32 high_seq; /* snd_nxt at onset of congestion */
393 __u32 retrans_stamp; /* Timestamp of the last retransmit,
394 * also used in SYN-SENT to remember stamp of
396 __u32 undo_marker; /* tracking retrans started here. */
397 int undo_retrans; /* number of undoable retransmissions. */
398 __u32 urg_seq; /* Seq of received urgent pointer */
399 __u16 urg_data; /* Saved octet of OOB data and control flags */
400 __u8 pending; /* Scheduled timer event */
401 __u8 urg_mode; /* In urgent mode */
402 __u32 snd_up; /* Urgent pointer */
404 /* The syn_wait_lock is necessary only to avoid tcp_get_info having
405 * to grab the main lock sock while browsing the listening hash
406 * (otherwise it's deadlock prone).
407 * This lock is acquired in read mode only from tcp_get_info() and
408 * it's acquired in write mode _only_ from code that is actively
409 * changing the syn_wait_queue. All readers that are holding
410 * the master sock lock don't need to grab this lock in read mode
411 * too as the syn_wait_queue writes are always protected from
412 * the main sock lock.
414 rwlock_t syn_wait_lock;
415 struct tcp_listen_opt *listen_opt;
417 /* FIFO of established children */
418 struct open_request *accept_queue;
419 struct open_request *accept_queue_tail;
421 int write_pending; /* A write to socket waits to start. */
423 unsigned int keepalive_time; /* time before keep alive takes place */
424 unsigned int keepalive_intvl; /* time interval between keep alive probes */
427 int frto_counter; /* Number of new acks after RTO */
428 __u32 frto_highmark; /* snd_nxt when RTO occurred */
430 unsigned long last_synq_overflow;
435 * This structure really needs to be cleaned up.
436 * Most of it is for TCP, and not used by any of
437 * the other protocols.
441 * The idea is to start moving to a newer struct gradualy
443 * IMHO the newer struct should have the following format:
446 * sockmem [mem, proto, callbacks]
470 * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
471 * So, net_pinfo is IPv6 are really, and protinfo unifies all another
475 /* Define this to get the sk->debug debugging facility. */
476 #define SOCK_DEBUGGING
477 #ifdef SOCK_DEBUGGING
478 #define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
480 #define SOCK_DEBUG(sk, msg...) do { } while (0)
483 /* This is the per-socket lock. The spinlock provides a synchronization
484 * between user contexts and software interrupt processing, whereas the
485 * mini-semaphore synchronizes multiple users amongst themselves.
490 wait_queue_head_t wq;
493 #define sock_lock_init(__sk) \
494 do { spin_lock_init(&((__sk)->lock.slock)); \
495 (__sk)->lock.users = 0; \
496 init_waitqueue_head(&((__sk)->lock.wq)); \
500 /* Socket demultiplex comparisons on incoming packets. */
501 __u32 daddr; /* Foreign IPv4 addr */
502 __u32 rcv_saddr; /* Bound local IPv4 addr */
503 __u16 dport; /* Destination port */
504 unsigned short num; /* Local port */
505 int bound_dev_if; /* Bound device index if != 0 */
507 /* Main hash linkage for various protocol lookup tables. */
510 struct sock *bind_next;
511 struct sock **bind_pprev;
513 volatile unsigned char state, /* Connection state */
514 zapped; /* In ax25 & ipx means not linked */
515 __u16 sport; /* Source port */
517 unsigned short family; /* Address family */
518 unsigned char reuse; /* SO_REUSEADDR setting */
519 unsigned char shutdown;
520 atomic_t refcnt; /* Reference count */
522 socket_lock_t lock; /* Synchronizer... */
523 int rcvbuf; /* Size of receive buffer in bytes */
525 wait_queue_head_t *sleep; /* Sock wait queue */
526 struct dst_entry *dst_cache; /* Destination cache */
528 atomic_t rmem_alloc; /* Receive queue bytes committed */
529 struct sk_buff_head receive_queue; /* Incoming packets */
530 atomic_t wmem_alloc; /* Transmit queue bytes committed */
531 struct sk_buff_head write_queue; /* Packet sending queue */
532 atomic_t omem_alloc; /* "o" is "option" or "other" */
533 int wmem_queued; /* Persistent queue size */
534 int forward_alloc; /* Space allocated forward. */
535 __u32 saddr; /* Sending source */
536 unsigned int allocation; /* Allocation mode */
537 int sndbuf; /* Size of send buffer in bytes */
540 /* Not all are volatile, but some are, so we might as well say they all are.
541 * XXX Make this a flag word -DaveM
553 unsigned char rcvtstamp;
554 unsigned char use_write_queue;
555 unsigned char userlocks;
556 /* Hole of 3 bytes. Try to pack. */
559 unsigned long lingertime;
564 /* The backlog queue is special, it is always used with
565 * the per-socket spinlock held and requires low latency
566 * access. Therefore we special case it's implementation.
569 struct sk_buff *head;
570 struct sk_buff *tail;
573 rwlock_t callback_lock;
575 /* Error queue, rarely used. */
576 struct sk_buff_head error_queue;
580 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
582 struct ipv6_pinfo af_inet6;
587 struct tcp_opt af_tcp;
588 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
589 struct raw_opt tp_raw4;
591 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
592 struct raw6_opt tp_raw;
593 #endif /* CONFIG_IPV6 */
594 #if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE)
595 struct spx_opt af_spx;
596 #endif /* CONFIG_SPX */
600 int err, err_soft; /* Soft holds errors that don't
601 cause failure but are the cause
602 of a persistent failure not just
604 unsigned short ack_backlog;
605 unsigned short max_ack_backlog;
608 unsigned char localroute; /* Route locally only */
609 unsigned char protocol;
610 struct ucred peercred;
616 /* Socket Filtering Instructions */
617 struct sk_filter *filter;
618 #endif /* CONFIG_FILTER */
620 /* This is where all the private (optional) areas that don't
621 * overlap will eventually live.
625 struct unix_opt af_unix;
626 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
627 struct inet_opt af_inet;
629 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
630 struct atalk_sock af_at;
632 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
633 struct ipx_opt af_ipx;
635 #if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
638 #if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
639 struct packet_opt *af_packet;
641 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
644 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
647 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
650 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
653 #if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
654 struct pppox_opt *pppox;
656 struct netlink_opt *af_netlink;
657 #if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
658 struct econet_opt *af_econet;
660 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
661 struct atm_vcc *af_atm;
663 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
664 struct irda_sock *irda;
666 #if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
667 struct wanpipe_opt *af_wanpipe;
672 /* This part is used for the timeout functions. */
673 struct timer_list timer; /* This is the sock cleanup timer. */
674 struct timeval stamp;
676 /* Identd and reporting IO signals */
677 struct socket *socket;
679 /* RPC layer private data */
683 void (*state_change)(struct sock *sk);
684 void (*data_ready)(struct sock *sk,int bytes);
685 void (*write_space)(struct sock *sk);
686 void (*error_report)(struct sock *sk);
688 int (*backlog_rcv) (struct sock *sk,
689 struct sk_buff *skb);
690 void (*destruct)(struct sock *sk);
693 /* The per-socket spinlock must be held here. */
694 #define sk_add_backlog(__sk, __skb) \
695 do { if((__sk)->backlog.tail == NULL) { \
696 (__sk)->backlog.head = \
697 (__sk)->backlog.tail = (__skb); \
699 ((__sk)->backlog.tail)->next = (__skb); \
700 (__sk)->backlog.tail = (__skb); \
702 (__skb)->next = NULL; \
705 /* IP protocol blocks we attach to sockets.
706 * socket layer -> transport layer interface
707 * transport -> network interface is defined by struct inet_proto
710 void (*close)(struct sock *sk,
712 int (*connect)(struct sock *sk,
713 struct sockaddr *uaddr,
715 int (*disconnect)(struct sock *sk, int flags);
717 struct sock * (*accept) (struct sock *sk, int flags, int *err);
719 int (*ioctl)(struct sock *sk, int cmd,
721 int (*init)(struct sock *sk);
722 int (*destroy)(struct sock *sk);
723 void (*shutdown)(struct sock *sk, int how);
724 int (*setsockopt)(struct sock *sk, int level,
725 int optname, char *optval, int optlen);
726 int (*getsockopt)(struct sock *sk, int level,
727 int optname, char *optval,
729 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
731 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
732 int len, int noblock, int flags,
734 int (*bind)(struct sock *sk,
735 struct sockaddr *uaddr, int addr_len);
737 int (*backlog_rcv) (struct sock *sk,
738 struct sk_buff *skb);
740 /* Keeping track of sk's, looking them up, and port selection methods. */
741 void (*hash)(struct sock *sk);
742 void (*unhash)(struct sock *sk);
743 int (*get_port)(struct sock *sk, unsigned short snum);
749 u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
753 /* Called with local bh disabled */
754 static __inline__ void sock_prot_inc_use(struct proto *prot)
756 prot->stats[smp_processor_id()].inuse++;
759 static __inline__ void sock_prot_dec_use(struct proto *prot)
761 prot->stats[smp_processor_id()].inuse--;
764 /* About 10 seconds */
765 #define SOCK_DESTROY_TIME (10*HZ)
767 /* Sockets 0-1023 can't be bound to unless you are superuser */
768 #define PROT_SOCK 1024
770 #define SHUTDOWN_MASK 3
771 #define RCV_SHUTDOWN 1
772 #define SEND_SHUTDOWN 2
774 #define SOCK_SNDBUF_LOCK 1
775 #define SOCK_RCVBUF_LOCK 2
776 #define SOCK_BINDADDR_LOCK 4
777 #define SOCK_BINDPORT_LOCK 8
780 /* Used by processes to "lock" a socket state, so that
781 * interrupts and bottom half handlers won't change it
782 * from under us. It essentially blocks any incoming
783 * packets, so that we won't get any new data or any
784 * packets that change the state of the socket.
786 * While locked, BH processing will add new packets to
787 * the backlog queue. This queue is processed by the
788 * owner of the socket lock right before it is released.
790 * Since ~2.3.5 it is also exclusive sleep lock serializing
791 * accesses from user process context.
793 extern void __lock_sock(struct sock *sk);
794 extern void __release_sock(struct sock *sk);
795 #define lock_sock(__sk) \
796 do { spin_lock_bh(&((__sk)->lock.slock)); \
797 if ((__sk)->lock.users != 0) \
799 (__sk)->lock.users = 1; \
800 spin_unlock_bh(&((__sk)->lock.slock)); \
803 #define release_sock(__sk) \
804 do { spin_lock_bh(&((__sk)->lock.slock)); \
805 if ((__sk)->backlog.tail != NULL) \
806 __release_sock(__sk); \
807 (__sk)->lock.users = 0; \
808 if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
809 spin_unlock_bh(&((__sk)->lock.slock)); \
812 /* BH context may only use the following locking interface. */
813 #define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
814 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
816 extern struct sock * sk_alloc(int family, int priority, int zero_it);
817 extern void sk_free(struct sock *sk);
819 extern struct sk_buff *sock_wmalloc(struct sock *sk,
820 unsigned long size, int force,
822 extern struct sk_buff *sock_rmalloc(struct sock *sk,
823 unsigned long size, int force,
825 extern void sock_wfree(struct sk_buff *skb);
826 extern void sock_rfree(struct sk_buff *skb);
828 extern int sock_setsockopt(struct socket *sock, int level,
829 int op, char *optval,
832 extern int sock_getsockopt(struct socket *sock, int level,
833 int op, char *optval,
835 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
839 extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
840 unsigned long header_len,
841 unsigned long data_len,
844 extern void *sock_kmalloc(struct sock *sk, int size, int priority);
845 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
848 * Functions to fill in entries in struct proto_ops when a protocol
849 * does not implement a particular function.
851 extern int sock_no_release(struct socket *);
852 extern int sock_no_bind(struct socket *,
853 struct sockaddr *, int);
854 extern int sock_no_connect(struct socket *,
855 struct sockaddr *, int, int);
856 extern int sock_no_socketpair(struct socket *,
858 extern int sock_no_accept(struct socket *,
859 struct socket *, int);
860 extern int sock_no_getname(struct socket *,
861 struct sockaddr *, int *, int);
862 extern unsigned int sock_no_poll(struct file *, struct socket *,
863 struct poll_table_struct *);
864 extern int sock_no_ioctl(struct socket *, unsigned int,
866 extern int sock_no_listen(struct socket *, int);
867 extern int sock_no_shutdown(struct socket *, int);
868 extern int sock_no_getsockopt(struct socket *, int , int,
870 extern int sock_no_setsockopt(struct socket *, int, int,
872 extern int sock_no_fcntl(struct socket *,
873 unsigned int, unsigned long);
874 extern int sock_no_sendmsg(struct socket *,
875 struct msghdr *, int,
876 struct scm_cookie *);
877 extern int sock_no_recvmsg(struct socket *,
878 struct msghdr *, int, int,
879 struct scm_cookie *);
880 extern int sock_no_mmap(struct file *file,
882 struct vm_area_struct *vma);
883 extern ssize_t sock_no_sendpage(struct socket *sock,
885 int offset, size_t size,
889 * Default socket callbacks and setup code
892 extern void sock_def_destruct(struct sock *);
894 /* Initialise core socket variables */
895 extern void sock_init_data(struct socket *sock, struct sock *sk);
897 extern void sklist_remove_socket(struct sock **list, struct sock *sk);
898 extern void sklist_insert_socket(struct sock **list, struct sock *sk);
899 extern void sklist_destroy_socket(struct sock **list, struct sock *sk);
904 * sk_filter - run a packet through a socket filter
905 * @skb: buffer to filter
906 * @filter: filter to apply
908 * Run the filter code and then cut skb->data to correct size returned by
909 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
910 * than pkt_len we keep whole skb->data. This is the socket level
911 * wrapper to sk_run_filter. It returns 0 if the packet should
912 * be accepted or 1 if the packet should be tossed.
915 static inline int sk_filter(struct sk_buff *skb, struct sk_filter *filter)
919 pkt_len = sk_run_filter(skb, filter->insns, filter->len);
921 return 1; /* Toss Packet */
923 skb_trim(skb, pkt_len);
929 * sk_filter_release: Release a socket filter
931 * @fp: filter to remove
933 * Remove a filter from a socket and release its resources.
936 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
938 unsigned int size = sk_filter_len(fp);
940 atomic_sub(size, &sk->omem_alloc);
942 if (atomic_dec_and_test(&fp->refcnt))
946 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
948 atomic_inc(&fp->refcnt);
949 atomic_add(sk_filter_len(fp), &sk->omem_alloc);
952 #endif /* CONFIG_FILTER */
955 * Socket reference counting postulates.
957 * * Each user of socket SHOULD hold a reference count.
958 * * Each access point to socket (an hash table bucket, reference from a list,
959 * running timer, skb in flight MUST hold a reference count.
960 * * When reference count hits 0, it means it will never increase back.
961 * * When reference count hits 0, it means that no references from
962 * outside exist to this socket and current process on current CPU
963 * is last user and may/should destroy this socket.
964 * * sk_free is called from any context: process, BH, IRQ. When
965 * it is called, socket has no references from outside -> sk_free
966 * may release descendant resources allocated by the socket, but
967 * to the time when it is called, socket is NOT referenced by any
968 * hash tables, lists etc.
969 * * Packets, delivered from outside (from network or from another process)
970 * and enqueued on receive/error queues SHOULD NOT grab reference count,
971 * when they sit in queue. Otherwise, packets will leak to hole, when
972 * socket is looked up by one cpu and unhasing is made by another CPU.
973 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
974 * (leak to backlog). Packet socket does all the processing inside
975 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
976 * use separate SMP lock, so that they are prone too.
979 /* Grab socket reference count. This operation is valid only
980 when sk is ALREADY grabbed f.e. it is found in hash table
981 or a list and the lookup is made under lock preventing hash table
985 static inline void sock_hold(struct sock *sk)
987 atomic_inc(&sk->refcnt);
990 /* Ungrab socket in the context, which assumes that socket refcnt
991 cannot hit zero, f.e. it is true in context of any socketcall.
993 static inline void __sock_put(struct sock *sk)
995 atomic_dec(&sk->refcnt);
998 /* Ungrab socket and destroy it, if it was the last reference. */
999 static inline void sock_put(struct sock *sk)
1001 if (atomic_dec_and_test(&sk->refcnt))
1005 /* Detach socket from process context.
1006 * Announce socket dead, detach it from wait queue and inode.
1007 * Note that parent inode held reference count on this struct sock,
1008 * we do not release it in this function, because protocol
1009 * probably wants some additional cleanups or even continuing
1010 * to work with this socket (TCP).
1012 static inline void sock_orphan(struct sock *sk)
1014 write_lock_bh(&sk->callback_lock);
1018 write_unlock_bh(&sk->callback_lock);
1021 static inline void sock_graft(struct sock *sk, struct socket *parent)
1023 write_lock_bh(&sk->callback_lock);
1024 sk->sleep = &parent->wait;
1026 sk->socket = parent;
1027 write_unlock_bh(&sk->callback_lock);
1030 static inline int sock_i_uid(struct sock *sk)
1034 read_lock(&sk->callback_lock);
1035 uid = sk->socket ? sk->socket->inode->i_uid : 0;
1036 read_unlock(&sk->callback_lock);
1040 static inline unsigned long sock_i_ino(struct sock *sk)
1044 read_lock(&sk->callback_lock);
1045 ino = sk->socket ? sk->socket->inode->i_ino : 0;
1046 read_unlock(&sk->callback_lock);
1050 static inline struct dst_entry *
1051 __sk_dst_get(struct sock *sk)
1053 return sk->dst_cache;
1056 static inline struct dst_entry *
1057 sk_dst_get(struct sock *sk)
1059 struct dst_entry *dst;
1061 read_lock(&sk->dst_lock);
1062 dst = sk->dst_cache;
1065 read_unlock(&sk->dst_lock);
1070 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1072 struct dst_entry *old_dst;
1074 old_dst = sk->dst_cache;
1075 sk->dst_cache = dst;
1076 dst_release(old_dst);
1080 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1082 write_lock(&sk->dst_lock);
1083 __sk_dst_set(sk, dst);
1084 write_unlock(&sk->dst_lock);
1088 __sk_dst_reset(struct sock *sk)
1090 struct dst_entry *old_dst;
1092 old_dst = sk->dst_cache;
1093 sk->dst_cache = NULL;
1094 dst_release(old_dst);
1098 sk_dst_reset(struct sock *sk)
1100 write_lock(&sk->dst_lock);
1102 write_unlock(&sk->dst_lock);
1105 static inline struct dst_entry *
1106 __sk_dst_check(struct sock *sk, u32 cookie)
1108 struct dst_entry *dst = sk->dst_cache;
1110 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1111 sk->dst_cache = NULL;
1118 static inline struct dst_entry *
1119 sk_dst_check(struct sock *sk, u32 cookie)
1121 struct dst_entry *dst = sk_dst_get(sk);
1123 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1133 * Queue a received datagram if it will fit. Stream and sequenced
1134 * protocols can't normally use this as they need to fit buffers in
1135 * and play with them.
1137 * Inlined as it's very short and called for pretty much every
1138 * packet ever received.
1141 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1145 skb->destructor = sock_wfree;
1146 atomic_add(skb->truesize, &sk->wmem_alloc);
1149 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1152 skb->destructor = sock_rfree;
1153 atomic_add(skb->truesize, &sk->rmem_alloc);
1156 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1158 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1159 number of warnings when compiling with -W --ANK
1161 if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1164 #ifdef CONFIG_FILTER
1167 struct sk_filter *filter;
1169 /* It would be deadlock, if sock_queue_rcv_skb is used
1170 with socket lock! We assume that users of this
1171 function are lock free.
1174 if ((filter = sk->filter) != NULL && sk_filter(skb, filter))
1178 return err; /* Toss packet */
1180 #endif /* CONFIG_FILTER */
1183 skb_set_owner_r(skb, sk);
1184 skb_queue_tail(&sk->receive_queue, skb);
1186 sk->data_ready(sk,skb->len);
1190 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1192 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1193 number of warnings when compiling with -W --ANK
1195 if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1197 skb_set_owner_r(skb, sk);
1198 skb_queue_tail(&sk->error_queue,skb);
1200 sk->data_ready(sk,skb->len);
1205 * Recover an error report and clear atomically
1208 static inline int sock_error(struct sock *sk)
1210 int err=xchg(&sk->err,0);
1214 static inline unsigned long sock_wspace(struct sock *sk)
1218 if (!(sk->shutdown & SEND_SHUTDOWN)) {
1219 amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
1226 static inline void sk_wake_async(struct sock *sk, int how, int band)
1228 if (sk->socket && sk->socket->fasync_list)
1229 sock_wake_async(sk->socket, how, band);
1232 #define SOCK_MIN_SNDBUF 2048
1233 #define SOCK_MIN_RCVBUF 256
1236 * Default write policy as shown to user space via poll/select/SIGIO
1238 static inline int sock_writeable(struct sock *sk)
1240 return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
1243 static inline int gfp_any(void)
1245 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1248 static inline long sock_rcvtimeo(struct sock *sk, int noblock)
1250 return noblock ? 0 : sk->rcvtimeo;
1253 static inline long sock_sndtimeo(struct sock *sk, int noblock)
1255 return noblock ? 0 : sk->sndtimeo;
1258 static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
1260 return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
1263 /* Alas, with timeout socket operations are not restartable.
1264 * Compare this to poll().
1266 static inline int sock_intr_errno(long timeo)
1268 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1271 static __inline__ void
1272 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1275 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
1277 sk->stamp = skb->stamp;
1281 * Enable debug/info messages
1285 #define NETDEBUG(x) do { } while (0)
1287 #define NETDEBUG(x) do { x; } while (0)
1291 * Macros for sleeping on a socket. Use them like this:
1293 * SOCK_SLEEP_PRE(sk)
1296 * SOCK_SLEEP_POST(sk)
1300 #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
1301 DECLARE_WAITQUEUE(wait, tsk); \
1302 tsk->state = TASK_INTERRUPTIBLE; \
1303 add_wait_queue((sk)->sleep, &wait); \
1306 #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
1307 remove_wait_queue((sk)->sleep, &wait); \
1311 extern __u32 sysctl_wmem_max;
1312 extern __u32 sysctl_rmem_max;
1314 #endif /* _SOCK_H */