3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 #include <trace/events/tcp.h>
74 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 const struct in6_addr *addr)
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 struct dst_entry *dst = skb_dst(skb);
97 if (dst && dst_hold_safe(dst)) {
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
123 /* This check is replicated from tcp_v6_connect() and intended to
124 * prevent BPF program called below from accessing bytes that are out
125 * of the bound specified by user in addr_len.
127 if (addr_len < SIN6_LEN_RFC2133)
130 sock_owned_by_me(sk);
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 struct inet_sock *inet = inet_sk(sk);
140 struct inet_connection_sock *icsk = inet_csk(sk);
141 struct ipv6_pinfo *np = inet6_sk(sk);
142 struct tcp_sock *tp = tcp_sk(sk);
143 struct in6_addr *saddr = NULL, *final_p, final;
144 struct ipv6_txoptions *opt;
146 struct dst_entry *dst;
149 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
151 if (addr_len < SIN6_LEN_RFC2133)
154 if (usin->sin6_family != AF_INET6)
155 return -EAFNOSUPPORT;
157 memset(&fl6, 0, sizeof(fl6));
160 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161 IP6_ECN_flow_init(fl6.flowlabel);
162 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
163 struct ip6_flowlabel *flowlabel;
164 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
167 fl6_sock_release(flowlabel);
172 * connect() to INADDR_ANY means loopback (BSD'ism).
175 if (ipv6_addr_any(&usin->sin6_addr)) {
176 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
180 usin->sin6_addr = in6addr_loopback;
183 addr_type = ipv6_addr_type(&usin->sin6_addr);
185 if (addr_type & IPV6_ADDR_MULTICAST)
188 if (addr_type&IPV6_ADDR_LINKLOCAL) {
189 if (addr_len >= sizeof(struct sockaddr_in6) &&
190 usin->sin6_scope_id) {
191 /* If interface is set while binding, indices
194 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
197 sk->sk_bound_dev_if = usin->sin6_scope_id;
200 /* Connect to link-local address requires an interface */
201 if (!sk->sk_bound_dev_if)
205 if (tp->rx_opt.ts_recent_stamp &&
206 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
207 tp->rx_opt.ts_recent = 0;
208 tp->rx_opt.ts_recent_stamp = 0;
212 sk->sk_v6_daddr = usin->sin6_addr;
213 np->flow_label = fl6.flowlabel;
219 if (addr_type & IPV6_ADDR_MAPPED) {
220 u32 exthdrlen = icsk->icsk_ext_hdr_len;
221 struct sockaddr_in sin;
223 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
225 if (__ipv6_only_sock(sk))
228 sin.sin_family = AF_INET;
229 sin.sin_port = usin->sin6_port;
230 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
232 icsk->icsk_af_ops = &ipv6_mapped;
233 sk->sk_backlog_rcv = tcp_v4_do_rcv;
234 #ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
238 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
241 icsk->icsk_ext_hdr_len = exthdrlen;
242 icsk->icsk_af_ops = &ipv6_specific;
243 sk->sk_backlog_rcv = tcp_v6_do_rcv;
244 #ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_specific;
249 np->saddr = sk->sk_v6_rcv_saddr;
254 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
255 saddr = &sk->sk_v6_rcv_saddr;
257 fl6.flowi6_proto = IPPROTO_TCP;
258 fl6.daddr = sk->sk_v6_daddr;
259 fl6.saddr = saddr ? *saddr : np->saddr;
260 fl6.flowi6_oif = sk->sk_bound_dev_if;
261 fl6.flowi6_mark = sk->sk_mark;
262 fl6.fl6_dport = usin->sin6_port;
263 fl6.fl6_sport = inet->inet_sport;
264 fl6.flowi6_uid = sk->sk_uid;
266 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
267 final_p = fl6_update_dst(&fl6, opt, &final);
269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
279 sk->sk_v6_rcv_saddr = *saddr;
282 /* set the source address */
284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
286 sk->sk_gso_type = SKB_GSO_TCPV6;
287 ip6_dst_store(sk, dst, NULL, NULL);
289 icsk->icsk_ext_hdr_len = 0;
291 icsk->icsk_ext_hdr_len = opt->opt_flen +
294 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
296 inet->inet_dport = usin->sin6_port;
298 tcp_set_state(sk, TCP_SYN_SENT);
299 err = inet6_hash_connect(tcp_death_row, sk);
305 if (likely(!tp->repair)) {
307 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
308 sk->sk_v6_daddr.s6_addr32,
311 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
313 sk->sk_v6_daddr.s6_addr32);
316 if (tcp_fastopen_defer_connect(sk, &err))
321 err = tcp_connect(sk);
328 tcp_set_state(sk, TCP_CLOSE);
330 inet->inet_dport = 0;
331 sk->sk_route_caps = 0;
335 static void tcp_v6_mtu_reduced(struct sock *sk)
337 struct dst_entry *dst;
339 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
342 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
346 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
347 tcp_sync_mss(sk, dst_mtu(dst));
348 tcp_simple_retransmit(sk);
352 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
353 u8 type, u8 code, int offset, __be32 info)
355 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
356 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
357 struct net *net = dev_net(skb->dev);
358 struct request_sock *fastopen;
359 struct ipv6_pinfo *np;
366 sk = __inet6_lookup_established(net, &tcp_hashinfo,
367 &hdr->daddr, th->dest,
368 &hdr->saddr, ntohs(th->source),
369 skb->dev->ifindex, inet6_sdif(skb));
372 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
377 if (sk->sk_state == TCP_TIME_WAIT) {
378 inet_twsk_put(inet_twsk(sk));
381 seq = ntohl(th->seq);
382 fatal = icmpv6_err_convert(type, code, &err);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
384 return tcp_req_err(sk, seq, fatal);
387 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
388 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
390 if (sk->sk_state == TCP_CLOSE)
393 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
394 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
399 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
400 fastopen = tp->fastopen_rsk;
401 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
402 if (sk->sk_state != TCP_LISTEN &&
403 !between(seq, snd_una, tp->snd_nxt)) {
404 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
410 if (type == NDISC_REDIRECT) {
411 if (!sock_owned_by_user(sk)) {
412 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
415 dst->ops->redirect(dst, sk, skb);
420 if (type == ICMPV6_PKT_TOOBIG) {
421 /* We are not interested in TCP_LISTEN and open_requests
422 * (SYN-ACKs send out by Linux are always <576bytes so
423 * they should go through unfragmented).
425 if (sk->sk_state == TCP_LISTEN)
428 if (!ip6_sk_accept_pmtu(sk))
431 tp->mtu_info = ntohl(info);
432 if (!sock_owned_by_user(sk))
433 tcp_v6_mtu_reduced(sk);
434 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
441 /* Might be for an request_sock */
442 switch (sk->sk_state) {
445 /* Only in fast or simultaneous open. If a fast open socket is
446 * is already accepted it is treated as a connected one below.
448 if (fastopen && !fastopen->sk)
451 if (!sock_owned_by_user(sk)) {
453 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
457 sk->sk_err_soft = err;
461 if (!sock_owned_by_user(sk) && np->recverr) {
463 sk->sk_error_report(sk);
465 sk->sk_err_soft = err;
473 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
475 struct request_sock *req,
476 struct tcp_fastopen_cookie *foc,
477 enum tcp_synack_type synack_type)
479 struct inet_request_sock *ireq = inet_rsk(req);
480 struct ipv6_pinfo *np = inet6_sk(sk);
481 struct ipv6_txoptions *opt;
482 struct flowi6 *fl6 = &fl->u.ip6;
486 /* First, grab a route. */
487 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
488 IPPROTO_TCP)) == NULL)
491 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
494 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
495 &ireq->ir_v6_rmt_addr);
497 fl6->daddr = ireq->ir_v6_rmt_addr;
498 if (np->repflow && ireq->pktopts)
499 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502 opt = ireq->ipv6_opt;
504 opt = rcu_dereference(np->opt);
505 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
507 err = net_xmit_eval(err);
515 static void tcp_v6_reqsk_destructor(struct request_sock *req)
517 kfree(inet_rsk(req)->ipv6_opt);
518 kfree_skb(inet_rsk(req)->pktopts);
521 #ifdef CONFIG_TCP_MD5SIG
522 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
523 const struct in6_addr *addr)
525 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
528 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
529 const struct sock *addr_sk)
531 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
534 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535 char __user *optval, int optlen)
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
541 if (optlen < sizeof(cmd))
544 if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 if (sin6->sin6_family != AF_INET6)
550 if (optname == TCP_MD5SIG_EXT &&
551 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552 prefixlen = cmd.tcpm_prefixlen;
553 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
557 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
560 if (!cmd.tcpm_keylen) {
561 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
562 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
564 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565 AF_INET6, prefixlen);
568 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
573 AF_INET, prefixlen, cmd.tcpm_key,
574 cmd.tcpm_keylen, GFP_KERNEL);
576 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
577 AF_INET6, prefixlen, cmd.tcpm_key,
578 cmd.tcpm_keylen, GFP_KERNEL);
581 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582 const struct in6_addr *daddr,
583 const struct in6_addr *saddr,
584 const struct tcphdr *th, int nbytes)
586 struct tcp6_pseudohdr *bp;
587 struct scatterlist sg;
591 /* 1. TCP pseudo-header (RFC2460) */
594 bp->protocol = cpu_to_be32(IPPROTO_TCP);
595 bp->len = cpu_to_be32(nbytes);
597 _th = (struct tcphdr *)(bp + 1);
598 memcpy(_th, th, sizeof(*th));
601 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603 sizeof(*bp) + sizeof(*th));
604 return crypto_ahash_update(hp->md5_req);
607 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
608 const struct in6_addr *daddr, struct in6_addr *saddr,
609 const struct tcphdr *th)
611 struct tcp_md5sig_pool *hp;
612 struct ahash_request *req;
614 hp = tcp_get_md5sig_pool();
616 goto clear_hash_noput;
619 if (crypto_ahash_init(req))
621 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
623 if (tcp_md5_hash_key(hp, key))
625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
629 tcp_put_md5sig_pool();
633 tcp_put_md5sig_pool();
635 memset(md5_hash, 0, 16);
639 static int tcp_v6_md5_hash_skb(char *md5_hash,
640 const struct tcp_md5sig_key *key,
641 const struct sock *sk,
642 const struct sk_buff *skb)
644 const struct in6_addr *saddr, *daddr;
645 struct tcp_md5sig_pool *hp;
646 struct ahash_request *req;
647 const struct tcphdr *th = tcp_hdr(skb);
649 if (sk) { /* valid for establish/request sockets */
650 saddr = &sk->sk_v6_rcv_saddr;
651 daddr = &sk->sk_v6_daddr;
653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654 saddr = &ip6h->saddr;
655 daddr = &ip6h->daddr;
658 hp = tcp_get_md5sig_pool();
660 goto clear_hash_noput;
663 if (crypto_ahash_init(req))
666 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
668 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
670 if (tcp_md5_hash_key(hp, key))
672 ahash_request_set_crypt(req, NULL, md5_hash, 0);
673 if (crypto_ahash_final(req))
676 tcp_put_md5sig_pool();
680 tcp_put_md5sig_pool();
682 memset(md5_hash, 0, 16);
688 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689 const struct sk_buff *skb)
691 #ifdef CONFIG_TCP_MD5SIG
692 const __u8 *hash_location = NULL;
693 struct tcp_md5sig_key *hash_expected;
694 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
695 const struct tcphdr *th = tcp_hdr(skb);
699 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
700 hash_location = tcp_parse_md5sig_option(th);
702 /* We've parsed the options - do we have a hash? */
703 if (!hash_expected && !hash_location)
706 if (hash_expected && !hash_location) {
707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
711 if (!hash_expected && hash_location) {
712 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
716 /* check the signature */
717 genhash = tcp_v6_md5_hash_skb(newhash,
721 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
723 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724 genhash ? "failed" : "mismatch",
725 &ip6h->saddr, ntohs(th->source),
726 &ip6h->daddr, ntohs(th->dest));
733 static void tcp_v6_init_req(struct request_sock *req,
734 const struct sock *sk_listener,
737 struct inet_request_sock *ireq = inet_rsk(req);
738 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
740 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
743 /* So that link locals have meaning */
744 if (!sk_listener->sk_bound_dev_if &&
745 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
746 ireq->ir_iif = tcp_v6_iif(skb);
748 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
749 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
750 np->rxopt.bits.rxinfo ||
751 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752 np->rxopt.bits.rxohlim || np->repflow)) {
753 refcount_inc(&skb->users);
758 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
760 const struct request_sock *req)
762 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
765 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
767 .obj_size = sizeof(struct tcp6_request_sock),
768 .rtx_syn_ack = tcp_rtx_synack,
769 .send_ack = tcp_v6_reqsk_send_ack,
770 .destructor = tcp_v6_reqsk_destructor,
771 .send_reset = tcp_v6_send_reset,
772 .syn_ack_timeout = tcp_syn_ack_timeout,
775 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
776 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
777 sizeof(struct ipv6hdr),
778 #ifdef CONFIG_TCP_MD5SIG
779 .req_md5_lookup = tcp_v6_md5_lookup,
780 .calc_md5_hash = tcp_v6_md5_hash_skb,
782 .init_req = tcp_v6_init_req,
783 #ifdef CONFIG_SYN_COOKIES
784 .cookie_init_seq = cookie_v6_init_sequence,
786 .route_req = tcp_v6_route_req,
787 .init_seq = tcp_v6_init_seq,
788 .init_ts_off = tcp_v6_init_ts_off,
789 .send_synack = tcp_v6_send_synack,
792 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
793 u32 ack, u32 win, u32 tsval, u32 tsecr,
794 int oif, struct tcp_md5sig_key *key, int rst,
795 u8 tclass, __be32 label)
797 const struct tcphdr *th = tcp_hdr(skb);
799 struct sk_buff *buff;
801 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
802 struct sock *ctl_sk = net->ipv6.tcp_sk;
803 unsigned int tot_len = sizeof(struct tcphdr);
804 struct dst_entry *dst;
808 tot_len += TCPOLEN_TSTAMP_ALIGNED;
809 #ifdef CONFIG_TCP_MD5SIG
811 tot_len += TCPOLEN_MD5SIG_ALIGNED;
814 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
819 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
821 t1 = skb_push(buff, tot_len);
822 skb_reset_transport_header(buff);
824 /* Swap the send and the receive. */
825 memset(t1, 0, sizeof(*t1));
826 t1->dest = th->source;
827 t1->source = th->dest;
828 t1->doff = tot_len / 4;
829 t1->seq = htonl(seq);
830 t1->ack_seq = htonl(ack);
831 t1->ack = !rst || !th->ack;
833 t1->window = htons(win);
835 topt = (__be32 *)(t1 + 1);
838 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
839 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
840 *topt++ = htonl(tsval);
841 *topt++ = htonl(tsecr);
844 #ifdef CONFIG_TCP_MD5SIG
846 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
847 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
848 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
849 &ipv6_hdr(skb)->saddr,
850 &ipv6_hdr(skb)->daddr, t1);
854 memset(&fl6, 0, sizeof(fl6));
855 fl6.daddr = ipv6_hdr(skb)->saddr;
856 fl6.saddr = ipv6_hdr(skb)->daddr;
857 fl6.flowlabel = label;
859 buff->ip_summed = CHECKSUM_PARTIAL;
862 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
864 fl6.flowi6_proto = IPPROTO_TCP;
865 if (rt6_need_strict(&fl6.daddr) && !oif)
866 fl6.flowi6_oif = tcp_v6_iif(skb);
868 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
871 fl6.flowi6_oif = oif;
874 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
875 fl6.fl6_dport = t1->dest;
876 fl6.fl6_sport = t1->source;
877 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
878 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
880 /* Pass a socket to ip6_dst_lookup either it is for RST
881 * Underlying function will use this to retrieve the network
884 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
886 skb_dst_set(buff, dst);
887 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
888 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
890 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
897 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
899 const struct tcphdr *th = tcp_hdr(skb);
900 u32 seq = 0, ack_seq = 0;
901 struct tcp_md5sig_key *key = NULL;
902 #ifdef CONFIG_TCP_MD5SIG
903 const __u8 *hash_location = NULL;
904 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
905 unsigned char newhash[16];
907 struct sock *sk1 = NULL;
914 /* If sk not NULL, it means we did a successful lookup and incoming
915 * route had to be correct. prequeue might have dropped our dst.
917 if (!sk && !ipv6_unicast_destination(skb))
920 #ifdef CONFIG_TCP_MD5SIG
922 hash_location = tcp_parse_md5sig_option(th);
923 if (sk && sk_fullsock(sk)) {
924 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
925 } else if (hash_location) {
927 * active side is lost. Try to find listening socket through
928 * source port, and then find md5 key through listening socket.
929 * we are not loose security here:
930 * Incoming packet is checked with md5 hash with finding key,
931 * no RST generated if md5 hash doesn't match.
933 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
934 &tcp_hashinfo, NULL, 0,
936 th->source, &ipv6h->daddr,
937 ntohs(th->source), tcp_v6_iif(skb),
942 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
946 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
947 if (genhash || memcmp(hash_location, newhash, 16) != 0)
953 seq = ntohl(th->ack_seq);
955 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
959 oif = sk->sk_bound_dev_if;
961 trace_tcp_send_reset(sk, skb);
964 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
966 #ifdef CONFIG_TCP_MD5SIG
972 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
973 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
974 struct tcp_md5sig_key *key, u8 tclass,
977 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
981 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
983 struct inet_timewait_sock *tw = inet_twsk(sk);
984 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
986 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
987 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
988 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
989 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
990 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
995 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
996 struct request_sock *req)
998 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
999 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1002 * The window field (SEG.WND) of every outgoing segment, with the
1003 * exception of <SYN> segments, MUST be right-shifted by
1004 * Rcv.Wind.Shift bits:
1006 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1007 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1008 tcp_rsk(req)->rcv_nxt,
1009 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1010 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1011 req->ts_recent, sk->sk_bound_dev_if,
1012 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1017 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1019 #ifdef CONFIG_SYN_COOKIES
1020 const struct tcphdr *th = tcp_hdr(skb);
1023 sk = cookie_v6_check(sk, skb);
1028 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1030 if (skb->protocol == htons(ETH_P_IP))
1031 return tcp_v4_conn_request(sk, skb);
1033 if (!ipv6_unicast_destination(skb))
1036 return tcp_conn_request(&tcp6_request_sock_ops,
1037 &tcp_request_sock_ipv6_ops, sk, skb);
1041 return 0; /* don't send reset */
1044 static void tcp_v6_restore_cb(struct sk_buff *skb)
1046 /* We need to move header back to the beginning if xfrm6_policy_check()
1047 * and tcp_v6_fill_cb() are going to be called again.
1048 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1050 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1051 sizeof(struct inet6_skb_parm));
1054 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1055 struct request_sock *req,
1056 struct dst_entry *dst,
1057 struct request_sock *req_unhash,
1060 struct inet_request_sock *ireq;
1061 struct ipv6_pinfo *newnp;
1062 const struct ipv6_pinfo *np = inet6_sk(sk);
1063 struct ipv6_txoptions *opt;
1064 struct tcp6_sock *newtcp6sk;
1065 struct inet_sock *newinet;
1066 struct tcp_sock *newtp;
1068 #ifdef CONFIG_TCP_MD5SIG
1069 struct tcp_md5sig_key *key;
1073 if (skb->protocol == htons(ETH_P_IP)) {
1078 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1079 req_unhash, own_req);
1084 newtcp6sk = (struct tcp6_sock *)newsk;
1085 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1087 newinet = inet_sk(newsk);
1088 newnp = inet6_sk(newsk);
1089 newtp = tcp_sk(newsk);
1091 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1093 newnp->saddr = newsk->sk_v6_rcv_saddr;
1095 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1096 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1097 #ifdef CONFIG_TCP_MD5SIG
1098 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1101 newnp->ipv6_mc_list = NULL;
1102 newnp->ipv6_ac_list = NULL;
1103 newnp->ipv6_fl_list = NULL;
1104 newnp->pktoptions = NULL;
1106 newnp->mcast_oif = tcp_v6_iif(skb);
1107 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1108 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1110 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1113 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1114 * here, tcp_create_openreq_child now does this for us, see the comment in
1115 * that function for the gory details. -acme
1118 /* It is tricky place. Until this moment IPv4 tcp
1119 worked with IPv6 icsk.icsk_af_ops.
1122 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1127 ireq = inet_rsk(req);
1129 if (sk_acceptq_is_full(sk))
1133 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1138 newsk = tcp_create_openreq_child(sk, req, skb);
1143 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1144 * count here, tcp_create_openreq_child now does this for us, see the
1145 * comment in that function for the gory details. -acme
1148 newsk->sk_gso_type = SKB_GSO_TCPV6;
1149 ip6_dst_store(newsk, dst, NULL, NULL);
1150 inet6_sk_rx_dst_set(newsk, skb);
1152 newtcp6sk = (struct tcp6_sock *)newsk;
1153 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1155 newtp = tcp_sk(newsk);
1156 newinet = inet_sk(newsk);
1157 newnp = inet6_sk(newsk);
1159 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1161 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1162 newnp->saddr = ireq->ir_v6_loc_addr;
1163 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1164 newsk->sk_bound_dev_if = ireq->ir_iif;
1166 /* Now IPv6 options...
1168 First: no IPv4 options.
1170 newinet->inet_opt = NULL;
1171 newnp->ipv6_mc_list = NULL;
1172 newnp->ipv6_ac_list = NULL;
1173 newnp->ipv6_fl_list = NULL;
1176 newnp->rxopt.all = np->rxopt.all;
1178 newnp->pktoptions = NULL;
1180 newnp->mcast_oif = tcp_v6_iif(skb);
1181 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1182 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1184 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1186 /* Clone native IPv6 options from listening socket (if any)
1188 Yes, keeping reference count would be much more clever,
1189 but we make one more one thing there: reattach optmem
1192 opt = ireq->ipv6_opt;
1194 opt = rcu_dereference(np->opt);
1196 opt = ipv6_dup_options(newsk, opt);
1197 RCU_INIT_POINTER(newnp->opt, opt);
1199 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1201 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1204 tcp_ca_openreq_child(newsk, dst);
1206 tcp_sync_mss(newsk, dst_mtu(dst));
1207 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1209 tcp_initialize_rcv_mss(newsk);
1211 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1212 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1214 #ifdef CONFIG_TCP_MD5SIG
1215 /* Copy over the MD5 key from the original socket */
1216 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1218 /* We're using one, so create a matching key
1219 * on the newsk structure. If we fail to get
1220 * memory, then we end up not copying the key
1223 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1224 AF_INET6, 128, key->key, key->keylen,
1225 sk_gfp_mask(sk, GFP_ATOMIC));
1229 if (__inet_inherit_port(sk, newsk) < 0) {
1230 inet_csk_prepare_forced_close(newsk);
1234 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1236 tcp_move_syn(newtp, req);
1238 /* Clone pktoptions received with SYN, if we own the req */
1239 if (ireq->pktopts) {
1240 newnp->pktoptions = skb_clone(ireq->pktopts,
1241 sk_gfp_mask(sk, GFP_ATOMIC));
1242 consume_skb(ireq->pktopts);
1243 ireq->pktopts = NULL;
1244 if (newnp->pktoptions) {
1245 tcp_v6_restore_cb(newnp->pktoptions);
1246 skb_set_owner_r(newnp->pktoptions, newsk);
1254 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1262 /* The socket must have it's spinlock held when we get
1263 * here, unless it is a TCP_LISTEN socket.
1265 * We have a potential double-lock case here, so even when
1266 * doing backlog processing we use the BH locking scheme.
1267 * This is because we cannot sleep with the original spinlock
1270 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1272 struct ipv6_pinfo *np = inet6_sk(sk);
1273 struct tcp_sock *tp;
1274 struct sk_buff *opt_skb = NULL;
1276 /* Imagine: socket is IPv6. IPv4 packet arrives,
1277 goes to IPv4 receive handler and backlogged.
1278 From backlog it always goes here. Kerboom...
1279 Fortunately, tcp_rcv_established and rcv_established
1280 handle them correctly, but it is not case with
1281 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1284 if (skb->protocol == htons(ETH_P_IP))
1285 return tcp_v4_do_rcv(sk, skb);
1288 * socket locking is here for SMP purposes as backlog rcv
1289 * is currently called with bh processing disabled.
1292 /* Do Stevens' IPV6_PKTOPTIONS.
1294 Yes, guys, it is the only place in our code, where we
1295 may make it not affecting IPv4.
1296 The rest of code is protocol independent,
1297 and I do not like idea to uglify IPv4.
1299 Actually, all the idea behind IPV6_PKTOPTIONS
1300 looks not very well thought. For now we latch
1301 options, received in the last packet, enqueued
1302 by tcp. Feel free to propose better solution.
1306 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1308 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1309 struct dst_entry *dst = sk->sk_rx_dst;
1311 sock_rps_save_rxhash(sk, skb);
1312 sk_mark_napi_id(sk, skb);
1314 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1315 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1317 sk->sk_rx_dst = NULL;
1321 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1323 goto ipv6_pktoptions;
1327 if (tcp_checksum_complete(skb))
1330 if (sk->sk_state == TCP_LISTEN) {
1331 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1337 if (tcp_child_process(sk, nsk, skb))
1340 __kfree_skb(opt_skb);
1344 sock_rps_save_rxhash(sk, skb);
1346 if (tcp_rcv_state_process(sk, skb))
1349 goto ipv6_pktoptions;
1353 tcp_v6_send_reset(sk, skb);
1356 __kfree_skb(opt_skb);
1360 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1361 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1366 /* Do you ask, what is it?
1368 1. skb was enqueued by tcp.
1369 2. skb is added to tail of read queue, rather than out of order.
1370 3. socket is not in passive state.
1371 4. Finally, it really contains options, which user wants to receive.
1374 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1375 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1376 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1377 np->mcast_oif = tcp_v6_iif(opt_skb);
1378 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1379 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1380 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1381 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1383 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1384 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1385 skb_set_owner_r(opt_skb, sk);
1386 tcp_v6_restore_cb(opt_skb);
1387 opt_skb = xchg(&np->pktoptions, opt_skb);
1389 __kfree_skb(opt_skb);
1390 opt_skb = xchg(&np->pktoptions, NULL);
1398 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1399 const struct tcphdr *th)
1401 /* This is tricky: we move IP6CB at its correct location into
1402 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1403 * _decode_session6() uses IP6CB().
1404 * barrier() makes sure compiler won't play aliasing games.
1406 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1407 sizeof(struct inet6_skb_parm));
1410 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1411 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1412 skb->len - th->doff*4);
1413 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1414 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1415 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1416 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1417 TCP_SKB_CB(skb)->sacked = 0;
1418 TCP_SKB_CB(skb)->has_rxtstamp =
1419 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1422 static int tcp_v6_rcv(struct sk_buff *skb)
1424 int sdif = inet6_sdif(skb);
1425 const struct tcphdr *th;
1426 const struct ipv6hdr *hdr;
1430 struct net *net = dev_net(skb->dev);
1432 if (skb->pkt_type != PACKET_HOST)
1436 * Count it even if it's bad.
1438 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1440 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1443 th = (const struct tcphdr *)skb->data;
1445 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1447 if (!pskb_may_pull(skb, th->doff*4))
1450 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1453 th = (const struct tcphdr *)skb->data;
1454 hdr = ipv6_hdr(skb);
1457 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1458 th->source, th->dest, inet6_iif(skb), sdif,
1464 if (sk->sk_state == TCP_TIME_WAIT)
1467 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1468 struct request_sock *req = inet_reqsk(sk);
1469 bool req_stolen = false;
1472 sk = req->rsk_listener;
1473 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1474 sk_drops_add(sk, skb);
1478 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1479 inet_csk_reqsk_queue_drop_and_put(sk, req);
1485 if (!tcp_filter(sk, skb)) {
1486 th = (const struct tcphdr *)skb->data;
1487 hdr = ipv6_hdr(skb);
1488 tcp_v6_fill_cb(skb, hdr, th);
1489 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1494 /* Another cpu got exclusive access to req
1495 * and created a full blown socket.
1496 * Try to feed this packet to this socket
1497 * instead of discarding it.
1499 tcp_v6_restore_cb(skb);
1503 goto discard_and_relse;
1507 tcp_v6_restore_cb(skb);
1508 } else if (tcp_child_process(sk, nsk, skb)) {
1509 tcp_v6_send_reset(nsk, skb);
1510 goto discard_and_relse;
1516 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1517 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1518 goto discard_and_relse;
1521 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1522 goto discard_and_relse;
1524 if (tcp_v6_inbound_md5_hash(sk, skb))
1525 goto discard_and_relse;
1527 if (tcp_filter(sk, skb))
1528 goto discard_and_relse;
1529 th = (const struct tcphdr *)skb->data;
1530 hdr = ipv6_hdr(skb);
1531 tcp_v6_fill_cb(skb, hdr, th);
1535 if (sk->sk_state == TCP_LISTEN) {
1536 ret = tcp_v6_do_rcv(sk, skb);
1537 goto put_and_return;
1540 sk_incoming_cpu_update(sk);
1542 bh_lock_sock_nested(sk);
1543 tcp_segs_in(tcp_sk(sk), skb);
1545 if (!sock_owned_by_user(sk)) {
1546 ret = tcp_v6_do_rcv(sk, skb);
1547 } else if (tcp_add_backlog(sk, skb)) {
1548 goto discard_and_relse;
1555 return ret ? -1 : 0;
1558 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1561 tcp_v6_fill_cb(skb, hdr, th);
1563 if (tcp_checksum_complete(skb)) {
1565 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1567 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1569 tcp_v6_send_reset(NULL, skb);
1577 sk_drops_add(sk, skb);
1583 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1584 inet_twsk_put(inet_twsk(sk));
1588 tcp_v6_fill_cb(skb, hdr, th);
1590 if (tcp_checksum_complete(skb)) {
1591 inet_twsk_put(inet_twsk(sk));
1595 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1600 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1601 skb, __tcp_hdrlen(th),
1602 &ipv6_hdr(skb)->saddr, th->source,
1603 &ipv6_hdr(skb)->daddr,
1604 ntohs(th->dest), tcp_v6_iif(skb),
1607 struct inet_timewait_sock *tw = inet_twsk(sk);
1608 inet_twsk_deschedule_put(tw);
1610 tcp_v6_restore_cb(skb);
1618 tcp_v6_timewait_ack(sk, skb);
1621 tcp_v6_send_reset(sk, skb);
1622 inet_twsk_deschedule_put(inet_twsk(sk));
1624 case TCP_TW_SUCCESS:
1630 static void tcp_v6_early_demux(struct sk_buff *skb)
1632 const struct ipv6hdr *hdr;
1633 const struct tcphdr *th;
1636 if (skb->pkt_type != PACKET_HOST)
1639 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1642 hdr = ipv6_hdr(skb);
1645 if (th->doff < sizeof(struct tcphdr) / 4)
1648 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1649 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1650 &hdr->saddr, th->source,
1651 &hdr->daddr, ntohs(th->dest),
1652 inet6_iif(skb), inet6_sdif(skb));
1655 skb->destructor = sock_edemux;
1656 if (sk_fullsock(sk)) {
1657 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1660 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1662 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1663 skb_dst_set_noref(skb, dst);
1668 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1669 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1670 .twsk_unique = tcp_twsk_unique,
1671 .twsk_destructor = tcp_twsk_destructor,
1674 static const struct inet_connection_sock_af_ops ipv6_specific = {
1675 .queue_xmit = inet6_csk_xmit,
1676 .send_check = tcp_v6_send_check,
1677 .rebuild_header = inet6_sk_rebuild_header,
1678 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1679 .conn_request = tcp_v6_conn_request,
1680 .syn_recv_sock = tcp_v6_syn_recv_sock,
1681 .net_header_len = sizeof(struct ipv6hdr),
1682 .net_frag_header_len = sizeof(struct frag_hdr),
1683 .setsockopt = ipv6_setsockopt,
1684 .getsockopt = ipv6_getsockopt,
1685 .addr2sockaddr = inet6_csk_addr2sockaddr,
1686 .sockaddr_len = sizeof(struct sockaddr_in6),
1687 #ifdef CONFIG_COMPAT
1688 .compat_setsockopt = compat_ipv6_setsockopt,
1689 .compat_getsockopt = compat_ipv6_getsockopt,
1691 .mtu_reduced = tcp_v6_mtu_reduced,
1694 #ifdef CONFIG_TCP_MD5SIG
1695 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1696 .md5_lookup = tcp_v6_md5_lookup,
1697 .calc_md5_hash = tcp_v6_md5_hash_skb,
1698 .md5_parse = tcp_v6_parse_md5_keys,
1703 * TCP over IPv4 via INET6 API
1705 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1706 .queue_xmit = ip_queue_xmit,
1707 .send_check = tcp_v4_send_check,
1708 .rebuild_header = inet_sk_rebuild_header,
1709 .sk_rx_dst_set = inet_sk_rx_dst_set,
1710 .conn_request = tcp_v6_conn_request,
1711 .syn_recv_sock = tcp_v6_syn_recv_sock,
1712 .net_header_len = sizeof(struct iphdr),
1713 .setsockopt = ipv6_setsockopt,
1714 .getsockopt = ipv6_getsockopt,
1715 .addr2sockaddr = inet6_csk_addr2sockaddr,
1716 .sockaddr_len = sizeof(struct sockaddr_in6),
1717 #ifdef CONFIG_COMPAT
1718 .compat_setsockopt = compat_ipv6_setsockopt,
1719 .compat_getsockopt = compat_ipv6_getsockopt,
1721 .mtu_reduced = tcp_v4_mtu_reduced,
1724 #ifdef CONFIG_TCP_MD5SIG
1725 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1726 .md5_lookup = tcp_v4_md5_lookup,
1727 .calc_md5_hash = tcp_v4_md5_hash_skb,
1728 .md5_parse = tcp_v6_parse_md5_keys,
1732 /* NOTE: A lot of things set to zero explicitly by call to
1733 * sk_alloc() so need not be done here.
1735 static int tcp_v6_init_sock(struct sock *sk)
1737 struct inet_connection_sock *icsk = inet_csk(sk);
1741 icsk->icsk_af_ops = &ipv6_specific;
1743 #ifdef CONFIG_TCP_MD5SIG
1744 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1750 static void tcp_v6_destroy_sock(struct sock *sk)
1752 tcp_v4_destroy_sock(sk);
1753 inet6_destroy_sock(sk);
1756 #ifdef CONFIG_PROC_FS
1757 /* Proc filesystem TCPv6 sock list dumping. */
1758 static void get_openreq6(struct seq_file *seq,
1759 const struct request_sock *req, int i)
1761 long ttd = req->rsk_timer.expires - jiffies;
1762 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1763 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1769 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1770 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1772 src->s6_addr32[0], src->s6_addr32[1],
1773 src->s6_addr32[2], src->s6_addr32[3],
1774 inet_rsk(req)->ir_num,
1775 dest->s6_addr32[0], dest->s6_addr32[1],
1776 dest->s6_addr32[2], dest->s6_addr32[3],
1777 ntohs(inet_rsk(req)->ir_rmt_port),
1779 0, 0, /* could print option size, but that is af dependent. */
1780 1, /* timers active (only the expire timer) */
1781 jiffies_to_clock_t(ttd),
1783 from_kuid_munged(seq_user_ns(seq),
1784 sock_i_uid(req->rsk_listener)),
1785 0, /* non standard timer */
1786 0, /* open_requests have no inode */
1790 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1792 const struct in6_addr *dest, *src;
1795 unsigned long timer_expires;
1796 const struct inet_sock *inet = inet_sk(sp);
1797 const struct tcp_sock *tp = tcp_sk(sp);
1798 const struct inet_connection_sock *icsk = inet_csk(sp);
1799 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1803 dest = &sp->sk_v6_daddr;
1804 src = &sp->sk_v6_rcv_saddr;
1805 destp = ntohs(inet->inet_dport);
1806 srcp = ntohs(inet->inet_sport);
1808 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1809 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1810 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1812 timer_expires = icsk->icsk_timeout;
1813 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1815 timer_expires = icsk->icsk_timeout;
1816 } else if (timer_pending(&sp->sk_timer)) {
1818 timer_expires = sp->sk_timer.expires;
1821 timer_expires = jiffies;
1824 state = inet_sk_state_load(sp);
1825 if (state == TCP_LISTEN)
1826 rx_queue = sp->sk_ack_backlog;
1828 /* Because we don't lock the socket,
1829 * we might find a transient negative value.
1831 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1834 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1835 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1837 src->s6_addr32[0], src->s6_addr32[1],
1838 src->s6_addr32[2], src->s6_addr32[3], srcp,
1839 dest->s6_addr32[0], dest->s6_addr32[1],
1840 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1842 tp->write_seq - tp->snd_una,
1845 jiffies_delta_to_clock_t(timer_expires - jiffies),
1846 icsk->icsk_retransmits,
1847 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1848 icsk->icsk_probes_out,
1850 refcount_read(&sp->sk_refcnt), sp,
1851 jiffies_to_clock_t(icsk->icsk_rto),
1852 jiffies_to_clock_t(icsk->icsk_ack.ato),
1853 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1855 state == TCP_LISTEN ?
1856 fastopenq->max_qlen :
1857 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1861 static void get_timewait6_sock(struct seq_file *seq,
1862 struct inet_timewait_sock *tw, int i)
1864 long delta = tw->tw_timer.expires - jiffies;
1865 const struct in6_addr *dest, *src;
1868 dest = &tw->tw_v6_daddr;
1869 src = &tw->tw_v6_rcv_saddr;
1870 destp = ntohs(tw->tw_dport);
1871 srcp = ntohs(tw->tw_sport);
1874 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1875 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1877 src->s6_addr32[0], src->s6_addr32[1],
1878 src->s6_addr32[2], src->s6_addr32[3], srcp,
1879 dest->s6_addr32[0], dest->s6_addr32[1],
1880 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1881 tw->tw_substate, 0, 0,
1882 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1883 refcount_read(&tw->tw_refcnt), tw);
1886 static int tcp6_seq_show(struct seq_file *seq, void *v)
1888 struct tcp_iter_state *st;
1889 struct sock *sk = v;
1891 if (v == SEQ_START_TOKEN) {
1896 "st tx_queue rx_queue tr tm->when retrnsmt"
1897 " uid timeout inode\n");
1902 if (sk->sk_state == TCP_TIME_WAIT)
1903 get_timewait6_sock(seq, v, st->num);
1904 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1905 get_openreq6(seq, v, st->num);
1907 get_tcp6_sock(seq, v, st->num);
1912 static const struct file_operations tcp6_afinfo_seq_fops = {
1913 .open = tcp_seq_open,
1915 .llseek = seq_lseek,
1916 .release = seq_release_net
1919 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1922 .seq_fops = &tcp6_afinfo_seq_fops,
1924 .show = tcp6_seq_show,
1928 int __net_init tcp6_proc_init(struct net *net)
1930 return tcp_proc_register(net, &tcp6_seq_afinfo);
1933 void tcp6_proc_exit(struct net *net)
1935 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1939 struct proto tcpv6_prot = {
1941 .owner = THIS_MODULE,
1943 .pre_connect = tcp_v6_pre_connect,
1944 .connect = tcp_v6_connect,
1945 .disconnect = tcp_disconnect,
1946 .accept = inet_csk_accept,
1948 .init = tcp_v6_init_sock,
1949 .destroy = tcp_v6_destroy_sock,
1950 .shutdown = tcp_shutdown,
1951 .setsockopt = tcp_setsockopt,
1952 .getsockopt = tcp_getsockopt,
1953 .keepalive = tcp_set_keepalive,
1954 .recvmsg = tcp_recvmsg,
1955 .sendmsg = tcp_sendmsg,
1956 .sendpage = tcp_sendpage,
1957 .backlog_rcv = tcp_v6_do_rcv,
1958 .release_cb = tcp_release_cb,
1960 .unhash = inet_unhash,
1961 .get_port = inet_csk_get_port,
1962 .enter_memory_pressure = tcp_enter_memory_pressure,
1963 .leave_memory_pressure = tcp_leave_memory_pressure,
1964 .stream_memory_free = tcp_stream_memory_free,
1965 .sockets_allocated = &tcp_sockets_allocated,
1966 .memory_allocated = &tcp_memory_allocated,
1967 .memory_pressure = &tcp_memory_pressure,
1968 .orphan_count = &tcp_orphan_count,
1969 .sysctl_mem = sysctl_tcp_mem,
1970 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1971 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1972 .max_header = MAX_TCP_HEADER,
1973 .obj_size = sizeof(struct tcp6_sock),
1974 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1975 .twsk_prot = &tcp6_timewait_sock_ops,
1976 .rsk_prot = &tcp6_request_sock_ops,
1977 .h.hashinfo = &tcp_hashinfo,
1978 .no_autobind = true,
1979 #ifdef CONFIG_COMPAT
1980 .compat_setsockopt = compat_tcp_setsockopt,
1981 .compat_getsockopt = compat_tcp_getsockopt,
1983 .diag_destroy = tcp_abort,
1986 /* thinking of making this const? Don't.
1987 * early_demux can change based on sysctl.
1989 static struct inet6_protocol tcpv6_protocol = {
1990 .early_demux = tcp_v6_early_demux,
1991 .early_demux_handler = tcp_v6_early_demux,
1992 .handler = tcp_v6_rcv,
1993 .err_handler = tcp_v6_err,
1994 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1997 static struct inet_protosw tcpv6_protosw = {
1998 .type = SOCK_STREAM,
1999 .protocol = IPPROTO_TCP,
2000 .prot = &tcpv6_prot,
2001 .ops = &inet6_stream_ops,
2002 .flags = INET_PROTOSW_PERMANENT |
2006 static int __net_init tcpv6_net_init(struct net *net)
2008 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2009 SOCK_RAW, IPPROTO_TCP, net);
2012 static void __net_exit tcpv6_net_exit(struct net *net)
2014 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2017 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2019 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2022 static struct pernet_operations tcpv6_net_ops = {
2023 .init = tcpv6_net_init,
2024 .exit = tcpv6_net_exit,
2025 .exit_batch = tcpv6_net_exit_batch,
2028 int __init tcpv6_init(void)
2032 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2036 /* register inet6 protocol */
2037 ret = inet6_register_protosw(&tcpv6_protosw);
2039 goto out_tcpv6_protocol;
2041 ret = register_pernet_subsys(&tcpv6_net_ops);
2043 goto out_tcpv6_protosw;
2048 inet6_unregister_protosw(&tcpv6_protosw);
2050 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2054 void tcpv6_exit(void)
2056 unregister_pernet_subsys(&tcpv6_net_ops);
2057 inet6_unregister_protosw(&tcpv6_protosw);
2058 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);