3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
10 * $Id: udp.c,v 1.1.1.1 2005/04/11 02:51:13 jack Exp $
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
15 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
16 * a single port at the same time.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
29 #include <linux/sched.h>
30 #include <linux/net.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/ipv6.h>
35 #include <linux/icmpv6.h>
36 #include <linux/init.h>
37 #include <asm/uaccess.h>
43 #include <net/ndisc.h>
44 #include <net/protocol.h>
45 #include <net/transp_v6.h>
46 #include <net/ip6_route.h>
47 #include <net/addrconf.h>
50 #include <net/inet_common.h>
52 #include <net/checksum.h>
54 struct udp_mib udp_stats_in6[NR_CPUS*2];
56 /* Grrr, addr_type already calculated by caller, but I don't want
57 * to add some silly "cookie" argument to this method just for that.
59 static int udp_v6_get_port(struct sock *sk, unsigned short snum)
61 write_lock_bh(&udp_hash_lock);
63 int best_size_so_far, best, result, i;
65 if (udp_port_rover > sysctl_local_port_range[1] ||
66 udp_port_rover < sysctl_local_port_range[0])
67 udp_port_rover = sysctl_local_port_range[0];
68 best_size_so_far = 32767;
69 best = result = udp_port_rover;
70 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
74 sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
76 if (result > sysctl_local_port_range[1])
77 result = sysctl_local_port_range[0] +
78 ((result - sysctl_local_port_range[0]) &
79 (UDP_HTABLE_SIZE - 1));
84 if (++size >= best_size_so_far)
86 } while ((sk = sk->next) != NULL);
87 best_size_so_far = size;
92 for(;; result += UDP_HTABLE_SIZE) {
93 if (result > sysctl_local_port_range[1])
94 result = sysctl_local_port_range[0]
95 + ((result - sysctl_local_port_range[0]) &
96 (UDP_HTABLE_SIZE - 1));
97 if (!udp_lport_inuse(result))
101 udp_port_rover = snum = result;
104 int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr);
106 for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
109 if (sk2->num == snum &&
111 sk2->bound_dev_if == sk->bound_dev_if &&
112 ((!sk2->rcv_saddr && !ipv6_only_sock(sk)) ||
113 (sk2->family == AF_INET6 &&
114 ipv6_addr_any(&sk2->net_pinfo.af_inet6.rcv_saddr) &&
115 !(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED)) ||
116 (addr_type == IPV6_ADDR_ANY &&
117 (!ipv6_only_sock(sk) ||
118 !(sk2->family == AF_INET6 ? (ipv6_addr_type(&sk2->net_pinfo.af_inet6.rcv_saddr) == IPV6_ADDR_MAPPED) : 1))) ||
119 (sk2->family == AF_INET6 &&
120 !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr,
121 &sk2->net_pinfo.af_inet6.rcv_saddr)) ||
122 (addr_type == IPV6_ADDR_MAPPED &&
123 !ipv6_only_sock(sk2) &&
126 sk->rcv_saddr == sk2->rcv_saddr))) &&
127 (!sk2->reuse || !sk->reuse))
133 if (sk->pprev == NULL) {
134 struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
135 if ((sk->next = *skp) != NULL)
136 (*skp)->pprev = &sk->next;
139 sock_prot_inc_use(sk->prot);
142 write_unlock_bh(&udp_hash_lock);
146 write_unlock_bh(&udp_hash_lock);
150 static void udp_v6_hash(struct sock *sk)
155 static void udp_v6_unhash(struct sock *sk)
157 write_lock_bh(&udp_hash_lock);
160 sk->next->pprev = sk->pprev;
161 *sk->pprev = sk->next;
164 sock_prot_dec_use(sk->prot);
167 write_unlock_bh(&udp_hash_lock);
170 static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
171 struct in6_addr *daddr, u16 dport, int dif)
173 struct sock *sk, *result = NULL;
174 unsigned short hnum = ntohs(dport);
177 read_lock(&udp_hash_lock);
178 for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
179 if((sk->num == hnum) &&
180 (sk->family == PF_INET6)) {
181 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
184 if(sk->dport != sport)
188 if(!ipv6_addr_any(&np->rcv_saddr)) {
189 if(ipv6_addr_cmp(&np->rcv_saddr, daddr))
193 if(!ipv6_addr_any(&np->daddr)) {
194 if(ipv6_addr_cmp(&np->daddr, saddr))
198 if(sk->bound_dev_if) {
199 if(sk->bound_dev_if != dif)
206 } else if(score > badness) {
214 read_unlock(&udp_hash_lock);
222 int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
224 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
225 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
226 struct in6_addr *daddr;
227 struct in6_addr saddr;
228 struct dst_entry *dst;
230 struct ip6_flowlabel *flowlabel = NULL;
234 if (usin->sin6_family == AF_INET) {
235 if (__ipv6_only_sock(sk))
236 return -EAFNOSUPPORT;
237 err = udp_connect(sk, uaddr, addr_len);
241 if (addr_len < SIN6_LEN_RFC2133)
244 if (usin->sin6_family != AF_INET6)
245 return -EAFNOSUPPORT;
247 fl.fl6_flowlabel = 0;
249 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
250 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
251 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
252 if (flowlabel == NULL)
254 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
258 addr_type = ipv6_addr_type(&usin->sin6_addr);
260 if (addr_type == IPV6_ADDR_ANY) {
264 usin->sin6_addr.s6_addr[15] = 0x01;
267 daddr = &usin->sin6_addr;
269 if (addr_type == IPV6_ADDR_MAPPED) {
270 struct sockaddr_in sin;
272 if (__ipv6_only_sock(sk))
275 sin.sin_family = AF_INET;
276 sin.sin_addr.s_addr = daddr->s6_addr32[3];
277 sin.sin_port = usin->sin6_port;
279 err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin));
285 ipv6_addr_set(&np->daddr, 0, 0,
289 if(ipv6_addr_any(&np->saddr)) {
290 ipv6_addr_set(&np->saddr, 0, 0,
295 if(ipv6_addr_any(&np->rcv_saddr)) {
296 ipv6_addr_set(&np->rcv_saddr, 0, 0,
303 if (addr_type&IPV6_ADDR_LINKLOCAL) {
304 if (addr_len >= sizeof(struct sockaddr_in6) &&
305 usin->sin6_scope_id) {
306 if (sk->bound_dev_if && sk->bound_dev_if != usin->sin6_scope_id) {
307 fl6_sock_release(flowlabel);
310 sk->bound_dev_if = usin->sin6_scope_id;
311 if (!sk->bound_dev_if && (addr_type&IPV6_ADDR_MULTICAST))
312 fl.oif = np->mcast_oif;
315 /* Connect to link-local address requires an interface */
316 if (sk->bound_dev_if == 0)
320 ipv6_addr_copy(&np->daddr, daddr);
321 np->flow_label = fl.fl6_flowlabel;
323 sk->dport = usin->sin6_port;
326 * Check for a route to destination an obtain the
327 * destination cache for it.
330 fl.proto = IPPROTO_UDP;
331 fl.fl6_dst = &np->daddr;
333 fl.oif = sk->bound_dev_if;
334 fl.uli_u.ports.dport = sk->dport;
335 fl.uli_u.ports.sport = sk->sport;
337 if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
338 fl.oif = np->mcast_oif;
341 if (flowlabel->opt && flowlabel->opt->srcrt) {
342 struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
343 fl.fl6_dst = rt0->addr;
345 } else if (np->opt && np->opt->srcrt) {
346 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
347 fl.fl6_dst = rt0->addr;
350 dst = ip6_route_output(sk, &fl);
352 if ((err = dst->error) != 0) {
354 fl6_sock_release(flowlabel);
358 ip6_dst_store(sk, dst, fl.fl6_dst);
360 /* get the source adddress used in the apropriate device */
362 err = ipv6_get_saddr(dst, daddr, &saddr);
365 if(ipv6_addr_any(&np->saddr))
366 ipv6_addr_copy(&np->saddr, &saddr);
368 if(ipv6_addr_any(&np->rcv_saddr)) {
369 ipv6_addr_copy(&np->rcv_saddr, &saddr);
370 sk->rcv_saddr = LOOPBACK4_IPV6;
372 sk->state = TCP_ESTABLISHED;
374 fl6_sock_release(flowlabel);
379 static void udpv6_close(struct sock *sk, long timeout)
381 inet_sock_release(sk);
385 * This should be easy, if there is something there we
386 * return it, otherwise we block.
389 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
390 int noblock, int flags, int *addr_len)
396 *addr_len=sizeof(struct sockaddr_in6);
398 if (flags & MSG_ERRQUEUE)
399 return ipv6_recv_error(sk, msg, len);
401 skb = skb_recv_datagram(sk, flags, noblock, &err);
405 copied = skb->len - sizeof(struct udphdr);
408 msg->msg_flags |= MSG_TRUNC;
411 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
412 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
414 } else if (msg->msg_flags&MSG_TRUNC) {
415 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
417 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
420 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
427 sock_recv_timestamp(msg, sk, skb);
429 /* Copy the address. */
431 struct sockaddr_in6 *sin6;
433 sin6 = (struct sockaddr_in6 *) msg->msg_name;
434 sin6->sin6_family = AF_INET6;
435 sin6->sin6_port = skb->h.uh->source;
436 sin6->sin6_flowinfo = 0;
437 sin6->sin6_scope_id = 0;
439 if (skb->protocol == htons(ETH_P_IP)) {
440 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
441 htonl(0xffff), skb->nh.iph->saddr);
442 if (sk->protinfo.af_inet.cmsg_flags)
443 ip_cmsg_recv(msg, skb);
445 memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
446 sizeof(struct in6_addr));
448 if (sk->net_pinfo.af_inet6.rxopt.all)
449 datagram_recv_ctl(sk, msg, skb);
450 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
451 struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
452 sin6->sin6_scope_id = opt->iif;
459 skb_free_datagram(sk, skb);
465 if (flags&MSG_PEEK) {
467 spin_lock_irq(&sk->receive_queue.lock);
468 if (skb == skb_peek(&sk->receive_queue)) {
469 __skb_unlink(skb, &sk->receive_queue);
472 spin_unlock_irq(&sk->receive_queue.lock);
477 /* Error for blocking case is chosen to masquerade
478 as some normal condition.
480 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
481 UDP6_INC_STATS_USER(UdpInErrors);
485 void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
486 int type, int code, int offset, __u32 info)
488 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
489 struct net_device *dev = skb->dev;
490 struct in6_addr *saddr = &hdr->saddr;
491 struct in6_addr *daddr = &hdr->daddr;
492 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
496 sk = udp_v6_lookup(daddr, uh->dest, saddr, uh->source, dev->ifindex);
501 if (!icmpv6_err_convert(type, code, &err) &&
502 !sk->net_pinfo.af_inet6.recverr)
505 if (sk->state!=TCP_ESTABLISHED &&
506 !sk->net_pinfo.af_inet6.recverr)
509 if (sk->net_pinfo.af_inet6.recverr)
510 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
513 sk->error_report(sk);
518 static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
520 #if defined(CONFIG_FILTER)
521 if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
522 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
523 UDP6_INC_STATS_BH(UdpInErrors);
524 IP6_INC_STATS_BH(Ip6InDiscards);
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
531 if (sock_queue_rcv_skb(sk,skb)<0) {
532 UDP6_INC_STATS_BH(UdpInErrors);
533 IP6_INC_STATS_BH(Ip6InDiscards);
537 IP6_INC_STATS_BH(Ip6InDelivers);
538 UDP6_INC_STATS_BH(UdpInDatagrams);
542 static struct sock *udp_v6_mcast_next(struct sock *sk,
543 u16 loc_port, struct in6_addr *loc_addr,
544 u16 rmt_port, struct in6_addr *rmt_addr,
548 unsigned short num = ntohs(loc_port);
549 for(; s; s = s->next) {
551 struct ipv6_pinfo *np = &s->net_pinfo.af_inet6;
553 if(s->dport != rmt_port)
556 if(!ipv6_addr_any(&np->daddr) &&
557 ipv6_addr_cmp(&np->daddr, rmt_addr))
560 if (s->bound_dev_if && s->bound_dev_if != dif)
563 if(!ipv6_addr_any(&np->rcv_saddr)) {
564 if(ipv6_addr_cmp(&np->rcv_saddr, loc_addr) == 0)
567 if(!inet6_mc_check(s, loc_addr))
576 * Note: called only from the BH handler context,
577 * so we don't need to lock the hashes.
579 static void udpv6_mcast_deliver(struct udphdr *uh,
580 struct in6_addr *saddr, struct in6_addr *daddr,
583 struct sock *sk, *sk2;
584 struct sk_buff *buff;
587 read_lock(&udp_hash_lock);
588 sk = udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)];
589 dif = skb->dev->ifindex;
590 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
596 while((sk2 = udp_v6_mcast_next(sk2->next, uh->dest, daddr,
597 uh->source, saddr, dif))) {
599 buff = skb_clone(skb, GFP_ATOMIC);
603 if (sock_queue_rcv_skb(sk2, buff) >= 0)
608 if (sock_queue_rcv_skb(sk, skb) < 0) {
612 read_unlock(&udp_hash_lock);
615 int udpv6_rcv(struct sk_buff *skb)
619 struct net_device *dev = skb->dev;
620 struct in6_addr *saddr, *daddr;
623 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
626 saddr = &skb->nh.ipv6h->saddr;
627 daddr = &skb->nh.ipv6h->daddr;
630 ulen = ntohs(uh->len);
632 /* Check for jumbo payload */
636 if (ulen > skb->len || ulen < sizeof(*uh))
639 if (uh->check == 0) {
640 /* IPv6 draft-v2 section 8.1 says that we SHOULD log
641 this error. Well, it is reasonable.
644 printk(KERN_INFO "IPv6: udp checksum is 0\n");
648 if (ulen < skb->len) {
649 if (__pskb_trim(skb, ulen))
651 saddr = &skb->nh.ipv6h->saddr;
652 daddr = &skb->nh.ipv6h->daddr;
656 if (skb->ip_summed==CHECKSUM_HW) {
657 skb->ip_summed = CHECKSUM_UNNECESSARY;
658 if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) {
659 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v6 hw csum failure.\n"));
660 skb->ip_summed = CHECKSUM_NONE;
663 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
664 skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0);
667 * Multicast receive code
669 if (ipv6_addr_type(daddr) & IPV6_ADDR_MULTICAST) {
670 udpv6_mcast_deliver(uh, saddr, daddr, skb);
677 * check socket cache ... must talk to Alan about his plans
678 * for sock caches... i'll skip this for now.
680 sk = udp_v6_lookup(saddr, uh->source, daddr, uh->dest, dev->ifindex);
683 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
684 (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
686 UDP6_INC_STATS_BH(UdpNoPorts);
688 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
696 udpv6_queue_rcv_skb(sk, skb);
702 printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len);
705 UDP6_INC_STATS_BH(UdpInErrors);
720 struct in6_addr *daddr;
727 static int udpv6_getfrag(const void *data, struct in6_addr *addr,
728 char *buff, unsigned int offset, unsigned int len)
730 struct udpv6fakehdr *udh = (struct udpv6fakehdr *) data;
738 offset -= sizeof(struct udphdr);
740 dst += sizeof(struct udphdr);
742 clen -= sizeof(struct udphdr);
745 if (csum_partial_copy_fromiovecend(dst, udh->iov, offset,
750 struct in6_addr *daddr;
752 udh->wcheck = csum_partial((char *)udh, sizeof(struct udphdr),
759 * use packet destination address
760 * this should improve cache locality
764 udh->uh.check = csum_ipv6_magic(addr, daddr,
765 udh->pl_len, IPPROTO_UDP,
767 if (udh->uh.check == 0)
770 memcpy(buff, udh, sizeof(struct udphdr));
775 static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
777 struct ipv6_txoptions opt_space;
778 struct udpv6fakehdr udh;
779 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
780 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
781 struct ipv6_txoptions *opt = NULL;
782 struct ip6_flowlabel *flowlabel = NULL;
784 int addr_len = msg->msg_namelen;
785 struct in6_addr *daddr;
786 int len = ulen + sizeof(struct udphdr);
792 /* Rough check on arithmetic overflow,
793 better check is made in ip6_build_xmit
795 if (ulen < 0 || ulen > INT_MAX - sizeof(struct udphdr))
798 fl.fl6_flowlabel = 0;
802 if (sin6->sin6_family == AF_INET) {
803 if (__ipv6_only_sock(sk))
805 return udp_sendmsg(sk, msg, ulen);
808 if (addr_len < SIN6_LEN_RFC2133)
811 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
814 if (sin6->sin6_port == 0)
817 udh.uh.dest = sin6->sin6_port;
818 daddr = &sin6->sin6_addr;
821 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
822 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
823 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
824 if (flowlabel == NULL)
826 daddr = &flowlabel->dst;
830 /* Otherwise it will be difficult to maintain sk->dst_cache. */
831 if (sk->state == TCP_ESTABLISHED &&
832 !ipv6_addr_cmp(daddr, &sk->net_pinfo.af_inet6.daddr))
833 daddr = &sk->net_pinfo.af_inet6.daddr;
835 if (addr_len >= sizeof(struct sockaddr_in6) &&
836 sin6->sin6_scope_id &&
837 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
838 fl.oif = sin6->sin6_scope_id;
840 if (sk->state != TCP_ESTABLISHED)
843 udh.uh.dest = sk->dport;
844 daddr = &sk->net_pinfo.af_inet6.daddr;
845 fl.fl6_flowlabel = np->flow_label;
848 addr_type = ipv6_addr_type(daddr);
850 if (addr_type == IPV6_ADDR_MAPPED) {
851 struct sockaddr_in sin;
853 if (__ipv6_only_sock(sk))
856 sin.sin_family = AF_INET;
857 sin.sin_addr.s_addr = daddr->s6_addr32[3];
858 sin.sin_port = udh.uh.dest;
859 msg->msg_name = (struct sockaddr *)(&sin);
860 msg->msg_namelen = sizeof(sin);
861 fl6_sock_release(flowlabel);
863 return udp_sendmsg(sk, msg, ulen);
868 fl.oif = sk->bound_dev_if;
871 if (msg->msg_controllen) {
873 memset(opt, 0, sizeof(struct ipv6_txoptions));
875 err = datagram_send_ctl(msg, &fl, opt, &hlimit);
877 fl6_sock_release(flowlabel);
880 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
881 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
882 if (flowlabel == NULL)
885 if (!(opt->opt_nflen|opt->opt_flen))
891 opt = fl6_merge_options(&opt_space, flowlabel, opt);
892 if (opt && opt->srcrt)
895 udh.uh.source = sk->sport;
896 udh.uh.len = len < 0x10000 ? htons(len) : 0;
898 udh.iov = msg->msg_iov;
902 fl.proto = IPPROTO_UDP;
904 if (fl.fl6_src == NULL && !ipv6_addr_any(&np->saddr))
905 fl.fl6_src = &np->saddr;
906 fl.uli_u.ports.dport = udh.uh.dest;
907 fl.uli_u.ports.sport = udh.uh.source;
909 err = ip6_build_xmit(sk, udpv6_getfrag, &udh, &fl, len, opt, hlimit,
912 fl6_sock_release(flowlabel);
917 UDP6_INC_STATS_USER(UdpOutDatagrams);
921 static struct inet6_protocol udpv6_protocol =
923 udpv6_rcv, /* UDP handler */
924 udpv6_err, /* UDP error control */
926 IPPROTO_UDP, /* protocol ID */
933 #define LINE_FMT "%-190s\n"
935 static void get_udp6_sock(struct sock *sp, char *tmpbuf, int i)
937 struct in6_addr *dest, *src;
940 dest = &sp->net_pinfo.af_inet6.daddr;
941 src = &sp->net_pinfo.af_inet6.rcv_saddr;
942 destp = ntohs(sp->dport);
943 srcp = ntohs(sp->sport);
945 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
946 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
948 src->s6_addr32[0], src->s6_addr32[1],
949 src->s6_addr32[2], src->s6_addr32[3], srcp,
950 dest->s6_addr32[0], dest->s6_addr32[1],
951 dest->s6_addr32[2], dest->s6_addr32[3], destp,
953 atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
957 atomic_read(&sp->refcnt), sp);
960 int udp6_get_info(char *buffer, char **start, off_t offset, int length)
962 int len = 0, num = 0, i;
965 char tmpbuf[LINE_LEN+2];
967 if (offset < LINE_LEN+1)
968 len += sprintf(buffer, LINE_FMT,
970 "local_address " /* 38 */
971 "remote_address " /* 38 */
972 "st tx_queue rx_queue tr tm->when retrnsmt" /* 41 */
973 " uid timeout inode"); /* 21 */
977 read_lock(&udp_hash_lock);
978 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
981 for (sk = udp_hash[i]; sk; sk = sk->next, num++) {
982 if (sk->family != PF_INET6)
987 get_udp6_sock(sk, tmpbuf, i);
988 len += sprintf(buffer+len, LINE_FMT, tmpbuf);
994 read_unlock(&udp_hash_lock);
995 begin = len - (pos - offset);
996 *start = buffer + begin;
1005 struct proto udpv6_prot = {
1008 connect: udpv6_connect,
1009 disconnect: udp_disconnect,
1011 destroy: inet6_destroy_sock,
1012 setsockopt: ipv6_setsockopt,
1013 getsockopt: ipv6_getsockopt,
1014 sendmsg: udpv6_sendmsg,
1015 recvmsg: udpv6_recvmsg,
1016 backlog_rcv: udpv6_queue_rcv_skb,
1018 unhash: udp_v6_unhash,
1019 get_port: udp_v6_get_port,
1022 extern struct proto_ops inet6_dgram_ops;
1024 static struct inet_protosw udpv6_protosw = {
1026 protocol: IPPROTO_UDP,
1028 ops: &inet6_dgram_ops,
1030 no_check: UDP_CSUM_DEFAULT,
1031 flags: INET_PROTOSW_PERMANENT,
1035 void __init udpv6_init(void)
1037 inet6_add_protocol(&udpv6_protocol);
1038 inet6_register_protosw(&udpv6_protosw);