2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.1.1.1 2005/04/11 02:51:13 jack Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * See ip_input.c for original log
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
45 #include <linux/types.h>
46 #include <linux/kernel.h>
47 #include <linux/sched.h>
49 #include <linux/string.h>
50 #include <linux/errno.h>
51 #include <linux/config.h>
53 #include <linux/socket.h>
54 #include <linux/sockios.h>
56 #include <linux/inet.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/proc_fs.h>
60 #include <linux/stat.h>
61 #include <linux/init.h>
65 #include <net/protocol.h>
66 #include <net/route.h>
69 #include <linux/skbuff.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/mroute.h>
79 #include <linux/netlink.h>
82 * Shall we try to damage output packets if routing dev changes?
85 int sysctl_ip_dynaddr = 0;
86 int sysctl_ip_default_ttl = IPDEFTTL;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 newskb->mac.raw = newskb->data;
99 __skb_pull(newskb, newskb->nh.raw - newskb->data);
100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
104 #ifdef CONFIG_NETFILTER_DEBUG
105 nf_debug_ip_loopback_xmit(newskb);
111 /* Don't just hand NF_HOOK skb->dst->output, in case netfilter hook
114 output_maybe_reroute(struct sk_buff *skb)
116 return skb->dst->output(skb);
120 * Add an ip header to a skbuff and send it out.
122 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
123 u32 saddr, u32 daddr, struct ip_options *opt)
125 struct rtable *rt = (struct rtable *)skb->dst;
128 /* Build the IP header. */
130 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
132 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
136 iph->tos = sk->protinfo.af_inet.tos;
137 if (ip_dont_fragment(sk, &rt->u.dst))
138 iph->frag_off = htons(IP_DF);
141 iph->ttl = sk->protinfo.af_inet.ttl;
142 iph->daddr = rt->rt_dst;
143 iph->saddr = rt->rt_src;
144 iph->protocol = sk->protocol;
145 iph->tot_len = htons(skb->len);
146 ip_select_ident(iph, &rt->u.dst, sk);
149 if (opt && opt->optlen) {
150 iph->ihl += opt->optlen>>2;
151 ip_options_build(skb, opt, daddr, rt, 0);
156 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
157 output_maybe_reroute);
160 static inline int ip_finish_output2(struct sk_buff *skb)
162 struct dst_entry *dst = skb->dst;
163 struct hh_cache *hh = dst->hh;
165 #ifdef CONFIG_NETFILTER_DEBUG
166 nf_debug_ip_finish_output2(skb);
167 #endif /*CONFIG_NETFILTER_DEBUG*/
170 read_lock_bh(&hh->hh_lock);
171 memcpy(skb->data - 16, hh->hh_data, 16);
172 read_unlock_bh(&hh->hh_lock);
173 skb_push(skb, hh->hh_len);
174 return hh->hh_output(skb);
175 } else if (dst->neighbour)
176 return dst->neighbour->output(skb);
179 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
184 __inline__ int ip_finish_output(struct sk_buff *skb)
186 struct net_device *dev = skb->dst->dev;
189 skb->protocol = htons(ETH_P_IP);
191 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
195 int ip_mc_output(struct sk_buff *skb)
197 struct sock *sk = skb->sk;
198 struct rtable *rt = (struct rtable*)skb->dst;
199 struct net_device *dev = rt->u.dst.dev;
202 * If the indicated interface is up and running, send the packet.
204 IP_INC_STATS(IpOutRequests);
205 #ifdef CONFIG_IP_ROUTE_NAT
206 if (rt->rt_flags & RTCF_NAT)
211 skb->protocol = htons(ETH_P_IP);
214 * Multicasts are looped back for other local users
217 if (rt->rt_flags&RTCF_MULTICAST) {
218 if ((!sk || sk->protinfo.af_inet.mc_loop)
219 #ifdef CONFIG_IP_MROUTE
220 /* Small optimization: do not loopback not local frames,
221 which returned after forwarding; they will be dropped
222 by ip_mr_input in any case.
223 Note, that local frames are looped back to be delivered
226 This check is duplicated in ip_mr_input at the moment.
228 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
231 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
233 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
235 ip_dev_loopback_xmit);
238 /* Multicasts with ttl 0 must not go beyond the host */
240 if (skb->nh.iph->ttl == 0) {
246 if (rt->rt_flags&RTCF_BROADCAST) {
247 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
249 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
250 newskb->dev, ip_dev_loopback_xmit);
253 return ip_finish_output(skb);
256 int ip_output(struct sk_buff *skb)
258 #ifdef CONFIG_IP_ROUTE_NAT
259 struct rtable *rt = (struct rtable*)skb->dst;
262 IP_INC_STATS(IpOutRequests);
264 #ifdef CONFIG_IP_ROUTE_NAT
265 if (rt->rt_flags&RTCF_NAT)
269 return ip_finish_output(skb);
272 /* Queues a packet to be sent, and starts the transmitter if necessary.
273 * This routine also needs to put in the total length and compute the
274 * checksum. We use to do this in two stages, ip_build_header() then
275 * this, but that scheme created a mess when routes disappeared etc.
276 * So we do it all here, and the TCP send engine has been changed to
277 * match. (No more unroutable FIN disasters, etc. wheee...) This will
278 * most likely make other reliable transport layers above IP easier
279 * to implement under Linux.
281 static inline int ip_queue_xmit2(struct sk_buff *skb)
283 struct sock *sk = skb->sk;
284 struct rtable *rt = (struct rtable *)skb->dst;
285 struct net_device *dev;
286 struct iphdr *iph = skb->nh.iph;
290 /* This can happen when the transport layer has segments queued
291 * with a cached route, and by the time we get here things are
292 * re-routed to a device with a different MTU than the original
293 * device. Sick, but we must cover it.
295 if (skb_headroom(skb) < dev->hard_header_len && dev->hard_header) {
296 struct sk_buff *skb2;
298 skb2 = skb_realloc_headroom(skb, (dev->hard_header_len + 15) & ~15);
303 skb_set_owner_w(skb2, sk);
308 if (skb->len > rt->u.dst.pmtu)
311 ip_select_ident(iph, &rt->u.dst, sk);
313 /* Add an IP checksum. */
316 skb->priority = sk->priority;
317 return skb->dst->output(skb);
320 if (ip_dont_fragment(sk, &rt->u.dst)) {
321 /* Reject packet ONLY if TCP might fragment
322 * it itself, if were careful enough.
324 NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big (len[%u] pmtu[%u]) to self\n",
325 skb->len, rt->u.dst.pmtu));
327 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
328 htonl(rt->u.dst.pmtu));
332 ip_select_ident(iph, &rt->u.dst, sk);
333 if (skb->ip_summed == CHECKSUM_HW &&
334 (skb = skb_checksum_help(skb)) == NULL)
336 return ip_fragment(skb, skb->dst->output);
339 int ip_queue_xmit(struct sk_buff *skb)
341 struct sock *sk = skb->sk;
342 struct ip_options *opt = sk->protinfo.af_inet.opt;
346 /* Skip all of this if the packet is already routed,
347 * f.e. by something like SCTP.
349 rt = (struct rtable *) skb->dst;
353 /* Make sure we can route this packet. */
354 rt = (struct rtable *)__sk_dst_check(sk, 0);
358 /* Use correct destination address if we have options. */
363 /* If this fails, retransmit mechanism of transport layer will
364 * keep trying until route appears or the connection times itself
367 if (ip_route_output(&rt, daddr, sk->saddr,
371 __sk_dst_set(sk, &rt->u.dst);
372 sk->route_caps = rt->u.dst.dev->features;
374 skb->dst = dst_clone(&rt->u.dst);
377 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
380 /* OK, we know where to send it, allocate and build IP header. */
381 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
382 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (sk->protinfo.af_inet.tos & 0xff));
383 iph->tot_len = htons(skb->len);
384 if (ip_dont_fragment(sk, &rt->u.dst))
385 iph->frag_off = htons(IP_DF);
388 iph->ttl = sk->protinfo.af_inet.ttl;
389 iph->protocol = sk->protocol;
390 iph->saddr = rt->rt_src;
391 iph->daddr = rt->rt_dst;
393 /* Transport layer set skb->h.foo itself. */
395 if(opt && opt->optlen) {
396 iph->ihl += opt->optlen >> 2;
397 ip_options_build(skb, opt, sk->daddr, rt, 0);
400 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
404 IP_INC_STATS(IpOutNoRoutes);
406 return -EHOSTUNREACH;
410 * Build and send a packet, with as little as one copy
412 * Doesn't care much about ip options... option length can be
413 * different for fragment at 0 and other fragments.
415 * Note that the fragment at the highest offset is sent first,
416 * so the getfrag routine can fill in the TCP/UDP checksum header
417 * field in the last fragment it sends... actually it also helps
418 * the reassemblers, they can put most packets in at the head of
419 * the fragment queue, and they know the total size in advance. This
420 * last feature will measurably improve the Linux fragment handler one
423 * The callback has five args, an arbitrary pointer (copy of frag),
424 * the source IP address (may depend on the routing table), the
425 * destination address (char *), the offset to copy from, and the
426 * length to be copied.
429 static int ip_build_xmit_slow(struct sock *sk,
430 int getfrag (const void *,
436 struct ipcm_cookie *ipc,
440 unsigned int fraglen, maxfraglen, fragheaderlen;
446 int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
448 struct ip_options *opt = ipc->opt;
451 mtu = rt->u.dst.pmtu;
452 if (ip_dont_fragment(sk, &rt->u.dst))
455 length -= sizeof(struct iphdr);
458 fragheaderlen = sizeof(struct iphdr) + opt->optlen;
459 maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
461 fragheaderlen = sizeof(struct iphdr);
464 * Fragheaderlen is the size of 'overhead' on each buffer. Now work
465 * out the size of the frames to send.
468 maxfraglen = ((mtu-sizeof(struct iphdr)) & ~7) + fragheaderlen;
471 if (length + fragheaderlen > 0xFFFF) {
472 ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
477 * Start at the end of the frame by handling the remainder.
480 offset = length - (length % (maxfraglen - fragheaderlen));
483 * Amount of memory to allocate for final fragment.
486 fraglen = length - offset + fragheaderlen;
488 if (length-offset==0) {
489 fraglen = maxfraglen;
490 offset -= maxfraglen-fragheaderlen;
494 * The last fragment will not have MF (more fragments) set.
500 * Don't fragment packets for path mtu discovery.
503 if (offset > 0 && sk->protinfo.af_inet.pmtudisc==IP_PMTUDISC_DO) {
504 ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
511 * Begin outputting the bytes.
514 id = sk->protinfo.af_inet.id++;
518 struct sk_buff * skb;
521 * Get the memory we require with some space left for alignment.
523 if (!(flags & MSG_DONTWAIT) || nfrags == 0) {
524 skb = sock_alloc_send_skb(sk, fraglen + hh_len + 15,
525 (flags & MSG_DONTWAIT), &err);
527 /* On a non-blocking write, we check for send buffer
528 * usage on the first fragment only.
530 skb = sock_wmalloc(sk, fraglen + hh_len + 15, 1,
539 * Fill in the control structures
542 skb->priority = sk->priority;
543 skb->dst = dst_clone(&rt->u.dst);
544 skb_reserve(skb, hh_len);
547 * Find where to start putting bytes.
550 data = skb_put(skb, fraglen);
551 skb->nh.iph = (struct iphdr *)data;
554 * Only write IP header onto non-raw packets
558 struct iphdr *iph = (struct iphdr *)data;
563 iph->ihl += opt->optlen>>2;
564 ip_options_build(skb, opt,
565 ipc->addr, rt, offset);
567 iph->tos = sk->protinfo.af_inet.tos;
568 iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
569 iph->frag_off = htons(offset>>3)|mf|df;
573 /* Select an unpredictable ident only
574 * for packets without DF or having
577 __ip_select_ident(iph, &rt->u.dst);
582 * Any further fragments will have MF set.
586 if (rt->rt_type == RTN_MULTICAST)
587 iph->ttl = sk->protinfo.af_inet.mc_ttl;
589 iph->ttl = sk->protinfo.af_inet.ttl;
590 iph->protocol = sk->protocol;
592 iph->saddr = rt->rt_src;
593 iph->daddr = rt->rt_dst;
594 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
602 if (getfrag(frag, data, offset, fraglen-fragheaderlen)) {
608 offset -= (maxfraglen-fragheaderlen);
609 fraglen = maxfraglen;
613 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
614 skb->dst->dev, output_maybe_reroute);
617 err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
621 } while (offset >= 0);
624 ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
629 IP_INC_STATS(IpOutDiscards);
631 ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
636 * Fast path for unfragmented packets.
638 int ip_build_xmit(struct sock *sk,
639 int getfrag (const void *,
645 struct ipcm_cookie *ipc,
655 * Try the simple case first. This leaves fragmented frames, and by
656 * choice RAW frames within 20 bytes of maximum size(rare) to the long path
659 if (!sk->protinfo.af_inet.hdrincl) {
660 length += sizeof(struct iphdr);
663 * Check for slow path.
665 if (length > rt->u.dst.pmtu || ipc->opt != NULL)
666 return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
668 if (length > rt->u.dst.dev->mtu) {
669 ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
677 * Do path mtu discovery if needed.
680 if (ip_dont_fragment(sk, &rt->u.dst))
684 * Fast path for unfragmented frames without options.
687 int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
689 skb = sock_alloc_send_skb(sk, length+hh_len+15,
690 flags&MSG_DONTWAIT, &err);
693 skb_reserve(skb, hh_len);
696 skb->priority = sk->priority;
697 skb->dst = dst_clone(&rt->u.dst);
699 skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
701 if(!sk->protinfo.af_inet.hdrincl) {
704 iph->tos=sk->protinfo.af_inet.tos;
705 iph->tot_len = htons(length);
707 iph->ttl=sk->protinfo.af_inet.mc_ttl;
708 ip_select_ident(iph, &rt->u.dst, sk);
709 if (rt->rt_type != RTN_MULTICAST)
710 iph->ttl=sk->protinfo.af_inet.ttl;
711 iph->protocol=sk->protocol;
712 iph->saddr=rt->rt_src;
713 iph->daddr=rt->rt_dst;
715 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
716 err = getfrag(frag, ((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);
719 err = getfrag(frag, (void *)iph, 0, length);
724 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
725 output_maybe_reroute);
727 err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
737 IP_INC_STATS(IpOutDiscards);
742 * This IP datagram is too large to be sent in one piece. Break it up into
743 * smaller pieces (each of size equal to IP header plus
744 * a block of the data of the original IP data part) that will yet fit in a
745 * single device frame, and queue such a frame for sending.
747 * Yes this is inefficient, feel free to submit a quicker one.
750 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
755 struct net_device *dev;
756 struct sk_buff *skb2;
757 unsigned int mtu, hlen, left, len;
760 struct rtable *rt = (struct rtable*)skb->dst;
766 * Point into the IP datagram header.
772 * Setup starting values.
776 left = skb->len - hlen; /* Space per frame */
777 mtu = rt->u.dst.pmtu - hlen; /* Size of data space */
778 ptr = raw + hlen; /* Where to start from */
781 * Fragment the datagram.
784 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
785 not_last_frag = iph->frag_off & htons(IP_MF);
788 * Keep copying data until we run out.
793 /* IF: it doesn't fit, use 'mtu' - the data space left */
796 /* IF: we are not sending upto and including the packet end
797 then align the next start on an eight byte boundary */
805 if ((skb2 = alloc_skb(len+hlen+dev->hard_header_len+15,GFP_ATOMIC)) == NULL) {
806 NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
812 * Set up data on packet
815 skb2->pkt_type = skb->pkt_type;
816 skb2->priority = skb->priority;
817 skb_reserve(skb2, (dev->hard_header_len+15)&~15);
818 skb_put(skb2, len + hlen);
819 skb2->nh.raw = skb2->data;
820 skb2->h.raw = skb2->data + hlen;
821 skb2->protocol = skb->protocol;
822 skb2->security = skb->security;
825 * Charge the memory for the fragment to any owner
830 skb_set_owner_w(skb2, skb->sk);
831 skb2->dst = dst_clone(skb->dst);
832 skb2->dev = skb->dev;
835 * Copy the packet header into the new buffer.
838 memcpy(skb2->nh.raw, skb->data, hlen);
841 * Copy a block of the IP datagram.
843 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
848 * Fill in the new header fields.
851 iph->frag_off = htons((offset >> 3));
853 /* ANK: dirty, but effective trick. Upgrade options only if
854 * the segment to be fragmented was THE FIRST (otherwise,
855 * options are already fixed) and make it ONCE
856 * on the initial skb, so that all the following fragments
857 * will inherit fixed options.
860 ip_options_fragment(skb);
862 /* Copy the flags to each fragment. */
863 IPCB(skb2)->flags = IPCB(skb)->flags;
866 * Added AC : If we are fragmenting a fragment that's not the
867 * last fragment then keep MF on each bit
869 if (left > 0 || not_last_frag)
870 iph->frag_off |= htons(IP_MF);
874 #ifdef CONFIG_NET_SCHED
875 skb2->tc_index = skb->tc_index;
877 #ifdef CONFIG_NETFILTER
878 skb2->nfmark = skb->nfmark;
879 /* Connection association is same as pre-frag packet */
880 skb2->nfct = skb->nfct;
881 nf_conntrack_get(skb2->nfct);
882 #ifdef CONFIG_NETFILTER_DEBUG
883 skb2->nf_debug = skb->nf_debug;
888 * Put this fragment into the sending queue.
891 IP_INC_STATS(IpFragCreates);
893 iph->tot_len = htons(len + hlen);
902 IP_INC_STATS(IpFragOKs);
907 IP_INC_STATS(IpFragFails);
912 * Fetch data from kernel space and fill in checksum if needed.
914 static int ip_reply_glue_bits(const void *dptr, char *to, unsigned int offset,
915 unsigned int fraglen)
917 struct ip_reply_arg *dp = (struct ip_reply_arg*)dptr;
918 u16 *pktp = (u16 *)to;
924 if (offset >= iov->iov_len) {
925 offset -= iov->iov_len;
929 len = iov->iov_len - offset;
930 if (fraglen > len) { /* overlapping. */
931 dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, len,
939 dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, fraglen,
942 if (hdrflag && dp->csumoffset)
943 *(pktp + dp->csumoffset) = csum_fold(dp->csum); /* fill in checksum */
948 * Generic function to send a packet as reply to another packet.
949 * Used to send TCP resets so far. ICMP should use this function too.
951 * Should run single threaded per socket because it uses the sock
952 * structure to pass arguments.
954 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
958 struct ip_options opt;
961 struct ipcm_cookie ipc;
963 struct rtable *rt = (struct rtable*)skb->dst;
965 if (ip_options_echo(&replyopts.opt, skb))
968 daddr = ipc.addr = rt->rt_src;
971 if (replyopts.opt.optlen) {
972 ipc.opt = &replyopts.opt;
975 daddr = replyopts.opt.faddr;
978 if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))
981 /* And let IP do all the hard work.
983 This chunk is not reenterable, hence spinlock.
984 Note that it uses the fact, that this function is called
985 with locally disabled BH and that sk cannot be already spinlocked.
988 sk->protinfo.af_inet.tos = skb->nh.iph->tos;
989 sk->priority = skb->priority;
990 sk->protocol = skb->nh.iph->protocol;
991 ip_build_xmit(sk, ip_reply_glue_bits, arg, len, &ipc, rt, MSG_DONTWAIT);
998 * IP protocol layer initialiser
1001 static struct packet_type ip_packet_type =
1003 __constant_htons(ETH_P_IP),
1004 NULL, /* All devices */
1011 * IP registers the packet type and then calls the subprotocol initialisers
1014 void __init ip_init(void)
1016 dev_add_pack(&ip_packet_type);
1021 #ifdef CONFIG_IP_MULTICAST
1022 proc_net_create("igmp", 0, ip_mc_procinfo);