2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/fcntl.h>
26 #include <linux/stat.h>
27 #include <linux/socket.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/compat.h>
35 #include <net/protocol.h>
36 #include <linux/skbuff.h>
38 #include <linux/notifier.h>
39 #include <linux/if_arp.h>
40 #include <net/checksum.h>
41 #include <net/netlink.h>
42 #include <net/fib_rules.h>
45 #include <net/ip6_route.h>
46 #include <linux/mroute6.h>
47 #include <linux/pim.h>
48 #include <net/addrconf.h>
49 #include <linux/netfilter_ipv6.h>
50 #include <linux/export.h>
51 #include <net/ip6_checksum.h>
52 #include <linux/netconf.h>
55 struct fib_rule common;
62 /* Big lock, protecting vif table, mrt cache and mroute socket state.
63 Note that the changes are semaphored via rtnl_lock.
66 static DEFINE_RWLOCK(mrt_lock);
68 /* Multicast router control variables */
70 /* Special spinlock for queue of unresolved entries */
71 static DEFINE_SPINLOCK(mfc_unres_lock);
73 /* We return to original Alan's scheme. Hash table of resolved
74 entries is changed only in process context and protected
75 with weak lock mrt_lock. Queue of unresolved entries is protected
76 with strong spinlock mfc_unres_lock.
78 In this case data path is free of exclusive locks at all.
81 static struct kmem_cache *mrt_cachep __read_mostly;
83 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
84 static void ip6mr_free_table(struct mr_table *mrt);
86 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
87 struct sk_buff *skb, struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, bool all);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
105 struct mr_table *ret;
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
114 if (&ret->list == &net->ipv6.mr6_tables)
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
121 struct mr_table *mrt;
123 ip6mr_for_each_table(mrt, net) {
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
137 .flags = FIB_LOOKUP_NOREF,
140 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
141 flowi6_to_flowi(flp6), 0, &arg);
148 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
151 struct ip6mr_result *res = arg->result;
152 struct mr_table *mrt;
154 switch (rule->action) {
157 case FR_ACT_UNREACHABLE:
159 case FR_ACT_PROHIBIT:
161 case FR_ACT_BLACKHOLE:
166 mrt = ip6mr_get_table(rule->fr_net, rule->table);
173 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
178 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
182 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb)
188 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
194 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh)
203 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
204 .family = RTNL_FAMILY_IP6MR,
205 .rule_size = sizeof(struct ip6mr_rule),
206 .addr_size = sizeof(struct in6_addr),
207 .action = ip6mr_rule_action,
208 .match = ip6mr_rule_match,
209 .configure = ip6mr_rule_configure,
210 .compare = ip6mr_rule_compare,
211 .fill = ip6mr_rule_fill,
212 .nlgroup = RTNLGRP_IPV6_RULE,
213 .policy = ip6mr_rule_policy,
214 .owner = THIS_MODULE,
217 static int __net_init ip6mr_rules_init(struct net *net)
219 struct fib_rules_ops *ops;
220 struct mr_table *mrt;
223 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
227 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
229 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
235 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
239 net->ipv6.mr6_rules_ops = ops;
243 ip6mr_free_table(mrt);
245 fib_rules_unregister(ops);
249 static void __net_exit ip6mr_rules_exit(struct net *net)
251 struct mr_table *mrt, *next;
254 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
255 list_del(&mrt->list);
256 ip6mr_free_table(mrt);
258 fib_rules_unregister(net->ipv6.mr6_rules_ops);
262 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
264 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
267 static unsigned int ip6mr_rules_seq_read(struct net *net)
269 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
272 bool ip6mr_rule_default(const struct fib_rule *rule)
274 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
275 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
277 EXPORT_SYMBOL(ip6mr_rule_default);
279 #define ip6mr_for_each_table(mrt, net) \
280 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
282 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
283 struct mr_table *mrt)
286 return net->ipv6.mrt6;
290 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
292 return net->ipv6.mrt6;
295 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
296 struct mr_table **mrt)
298 *mrt = net->ipv6.mrt6;
302 static int __net_init ip6mr_rules_init(struct net *net)
304 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
305 return net->ipv6.mrt6 ? 0 : -ENOMEM;
308 static void __net_exit ip6mr_rules_exit(struct net *net)
311 ip6mr_free_table(net->ipv6.mrt6);
312 net->ipv6.mrt6 = NULL;
316 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
321 static unsigned int ip6mr_rules_seq_read(struct net *net)
327 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
330 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
331 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
333 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
334 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
337 static const struct rhashtable_params ip6mr_rht_params = {
338 .head_offset = offsetof(struct mr_mfc, mnode),
339 .key_offset = offsetof(struct mfc6_cache, cmparg),
340 .key_len = sizeof(struct mfc6_cache_cmp_arg),
343 .obj_cmpfn = ip6mr_hash_cmp,
344 .automatic_shrinking = true,
347 static void ip6mr_new_table_set(struct mr_table *mrt,
350 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
351 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
355 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
356 .mf6c_origin = IN6ADDR_ANY_INIT,
357 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
360 static struct mr_table_ops ip6mr_mr_table_ops = {
361 .rht_params = &ip6mr_rht_params,
362 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
365 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
367 struct mr_table *mrt;
369 mrt = ip6mr_get_table(net, id);
373 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
374 ipmr_expire_process, ip6mr_new_table_set);
377 static void ip6mr_free_table(struct mr_table *mrt)
379 del_timer_sync(&mrt->ipmr_expire_timer);
380 mroute_clean_tables(mrt, true);
381 rhltable_destroy(&mrt->mfc_hash);
385 #ifdef CONFIG_PROC_FS
386 /* The /proc interfaces to multicast routing
387 * /proc/ip6_mr_cache /proc/ip6_mr_vif
390 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
393 struct mr_vif_iter *iter = seq->private;
394 struct net *net = seq_file_net(seq);
395 struct mr_table *mrt;
397 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
399 return ERR_PTR(-ENOENT);
403 read_lock(&mrt_lock);
404 return mr_vif_seq_start(seq, pos);
407 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
410 read_unlock(&mrt_lock);
413 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
415 struct mr_vif_iter *iter = seq->private;
416 struct mr_table *mrt = iter->mrt;
418 if (v == SEQ_START_TOKEN) {
420 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
422 const struct vif_device *vif = v;
423 const char *name = vif->dev ? vif->dev->name : "none";
426 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
427 vif - mrt->vif_table,
428 name, vif->bytes_in, vif->pkt_in,
429 vif->bytes_out, vif->pkt_out,
435 static const struct seq_operations ip6mr_vif_seq_ops = {
436 .start = ip6mr_vif_seq_start,
437 .next = mr_vif_seq_next,
438 .stop = ip6mr_vif_seq_stop,
439 .show = ip6mr_vif_seq_show,
442 static int ip6mr_vif_open(struct inode *inode, struct file *file)
444 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
445 sizeof(struct mr_vif_iter));
448 static const struct file_operations ip6mr_vif_fops = {
449 .open = ip6mr_vif_open,
452 .release = seq_release_net,
455 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
457 struct net *net = seq_file_net(seq);
458 struct mr_table *mrt;
460 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
462 return ERR_PTR(-ENOENT);
464 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
467 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
471 if (v == SEQ_START_TOKEN) {
475 "Iif Pkts Bytes Wrong Oifs\n");
477 const struct mfc6_cache *mfc = v;
478 const struct mr_mfc_iter *it = seq->private;
479 struct mr_table *mrt = it->mrt;
481 seq_printf(seq, "%pI6 %pI6 %-3hd",
482 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
485 if (it->cache != &mrt->mfc_unres_queue) {
486 seq_printf(seq, " %8lu %8lu %8lu",
487 mfc->_c.mfc_un.res.pkt,
488 mfc->_c.mfc_un.res.bytes,
489 mfc->_c.mfc_un.res.wrong_if);
490 for (n = mfc->_c.mfc_un.res.minvif;
491 n < mfc->_c.mfc_un.res.maxvif; n++) {
492 if (VIF_EXISTS(mrt, n) &&
493 mfc->_c.mfc_un.res.ttls[n] < 255)
496 mfc->_c.mfc_un.res.ttls[n]);
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
502 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
509 static const struct seq_operations ipmr_mfc_seq_ops = {
510 .start = ipmr_mfc_seq_start,
511 .next = mr_mfc_seq_next,
512 .stop = mr_mfc_seq_stop,
513 .show = ipmr_mfc_seq_show,
516 static int ipmr_mfc_open(struct inode *inode, struct file *file)
518 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
519 sizeof(struct mr_mfc_iter));
522 static const struct file_operations ip6mr_mfc_fops = {
523 .open = ipmr_mfc_open,
526 .release = seq_release_net,
530 #ifdef CONFIG_IPV6_PIMSM_V2
532 static int pim6_rcv(struct sk_buff *skb)
534 struct pimreghdr *pim;
535 struct ipv6hdr *encap;
536 struct net_device *reg_dev = NULL;
537 struct net *net = dev_net(skb->dev);
538 struct mr_table *mrt;
539 struct flowi6 fl6 = {
540 .flowi6_iif = skb->dev->ifindex,
541 .flowi6_mark = skb->mark,
545 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
548 pim = (struct pimreghdr *)skb_transport_header(skb);
549 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
550 (pim->flags & PIM_NULL_REGISTER) ||
551 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
552 sizeof(*pim), IPPROTO_PIM,
553 csum_partial((void *)pim, sizeof(*pim), 0)) &&
554 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
557 /* check if the inner packet is destined to mcast group */
558 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
561 if (!ipv6_addr_is_multicast(&encap->daddr) ||
562 encap->payload_len == 0 ||
563 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
566 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
568 reg_vif_num = mrt->mroute_reg_vif_num;
570 read_lock(&mrt_lock);
571 if (reg_vif_num >= 0)
572 reg_dev = mrt->vif_table[reg_vif_num].dev;
575 read_unlock(&mrt_lock);
580 skb->mac_header = skb->network_header;
581 skb_pull(skb, (u8 *)encap - skb->data);
582 skb_reset_network_header(skb);
583 skb->protocol = htons(ETH_P_IPV6);
584 skb->ip_summed = CHECKSUM_NONE;
586 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
597 static const struct inet6_protocol pim6_protocol = {
601 /* Service routines creating virtual interfaces: PIMREG */
603 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
604 struct net_device *dev)
606 struct net *net = dev_net(dev);
607 struct mr_table *mrt;
608 struct flowi6 fl6 = {
609 .flowi6_oif = dev->ifindex,
610 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
611 .flowi6_mark = skb->mark,
615 err = ip6mr_fib_lookup(net, &fl6, &mrt);
621 read_lock(&mrt_lock);
622 dev->stats.tx_bytes += skb->len;
623 dev->stats.tx_packets++;
624 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
625 read_unlock(&mrt_lock);
630 static int reg_vif_get_iflink(const struct net_device *dev)
635 static const struct net_device_ops reg_vif_netdev_ops = {
636 .ndo_start_xmit = reg_vif_xmit,
637 .ndo_get_iflink = reg_vif_get_iflink,
640 static void reg_vif_setup(struct net_device *dev)
642 dev->type = ARPHRD_PIMREG;
643 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
644 dev->flags = IFF_NOARP;
645 dev->netdev_ops = ®_vif_netdev_ops;
646 dev->needs_free_netdev = true;
647 dev->features |= NETIF_F_NETNS_LOCAL;
650 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
652 struct net_device *dev;
655 if (mrt->id == RT6_TABLE_DFLT)
656 sprintf(name, "pim6reg");
658 sprintf(name, "pim6reg%u", mrt->id);
660 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
664 dev_net_set(dev, net);
666 if (register_netdevice(dev)) {
678 unregister_netdevice(dev);
683 static int call_ip6mr_vif_entry_notifiers(struct net *net,
684 enum fib_event_type event_type,
685 struct vif_device *vif,
686 mifi_t vif_index, u32 tb_id)
688 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 vif, vif_index, tb_id,
690 &net->ipv6.ipmr_seq);
693 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
694 enum fib_event_type event_type,
695 struct mfc6_cache *mfc, u32 tb_id)
697 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
698 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
701 /* Delete a VIF entry */
702 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
703 struct list_head *head)
705 struct vif_device *v;
706 struct net_device *dev;
707 struct inet6_dev *in6_dev;
709 if (vifi < 0 || vifi >= mrt->maxvif)
710 return -EADDRNOTAVAIL;
712 v = &mrt->vif_table[vifi];
714 if (VIF_EXISTS(mrt, vifi))
715 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
716 FIB_EVENT_VIF_DEL, v, vifi,
719 write_lock_bh(&mrt_lock);
724 write_unlock_bh(&mrt_lock);
725 return -EADDRNOTAVAIL;
728 #ifdef CONFIG_IPV6_PIMSM_V2
729 if (vifi == mrt->mroute_reg_vif_num)
730 mrt->mroute_reg_vif_num = -1;
733 if (vifi + 1 == mrt->maxvif) {
735 for (tmp = vifi - 1; tmp >= 0; tmp--) {
736 if (VIF_EXISTS(mrt, tmp))
739 mrt->maxvif = tmp + 1;
742 write_unlock_bh(&mrt_lock);
744 dev_set_allmulti(dev, -1);
746 in6_dev = __in6_dev_get(dev);
748 in6_dev->cnf.mc_forwarding--;
749 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
750 NETCONFA_MC_FORWARDING,
751 dev->ifindex, &in6_dev->cnf);
754 if ((v->flags & MIFF_REGISTER) && !notify)
755 unregister_netdevice_queue(dev, head);
761 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
763 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
765 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
768 static inline void ip6mr_cache_free(struct mfc6_cache *c)
770 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
773 /* Destroy an unresolved cache entry, killing queued skbs
774 and reporting error to netlink readers.
777 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
779 struct net *net = read_pnet(&mrt->net);
782 atomic_dec(&mrt->cache_resolve_queue_len);
784 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
785 if (ipv6_hdr(skb)->version == 0) {
786 struct nlmsghdr *nlh = skb_pull(skb,
787 sizeof(struct ipv6hdr));
788 nlh->nlmsg_type = NLMSG_ERROR;
789 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
790 skb_trim(skb, nlh->nlmsg_len);
791 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
792 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
801 /* Timer process for all the unresolved queue. */
803 static void ipmr_do_expire_process(struct mr_table *mrt)
805 unsigned long now = jiffies;
806 unsigned long expires = 10 * HZ;
807 struct mr_mfc *c, *next;
809 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
810 if (time_after(c->mfc_un.unres.expires, now)) {
812 unsigned long interval = c->mfc_un.unres.expires - now;
813 if (interval < expires)
819 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
820 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
823 if (!list_empty(&mrt->mfc_unres_queue))
824 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
827 static void ipmr_expire_process(struct timer_list *t)
829 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
831 if (!spin_trylock(&mfc_unres_lock)) {
832 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
836 if (!list_empty(&mrt->mfc_unres_queue))
837 ipmr_do_expire_process(mrt);
839 spin_unlock(&mfc_unres_lock);
842 /* Fill oifs list. It is called under write locked mrt_lock. */
844 static void ip6mr_update_thresholds(struct mr_table *mrt,
845 struct mr_mfc *cache,
850 cache->mfc_un.res.minvif = MAXMIFS;
851 cache->mfc_un.res.maxvif = 0;
852 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
854 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
855 if (VIF_EXISTS(mrt, vifi) &&
856 ttls[vifi] && ttls[vifi] < 255) {
857 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
858 if (cache->mfc_un.res.minvif > vifi)
859 cache->mfc_un.res.minvif = vifi;
860 if (cache->mfc_un.res.maxvif <= vifi)
861 cache->mfc_un.res.maxvif = vifi + 1;
864 cache->mfc_un.res.lastuse = jiffies;
867 static int mif6_add(struct net *net, struct mr_table *mrt,
868 struct mif6ctl *vifc, int mrtsock)
870 int vifi = vifc->mif6c_mifi;
871 struct vif_device *v = &mrt->vif_table[vifi];
872 struct net_device *dev;
873 struct inet6_dev *in6_dev;
877 if (VIF_EXISTS(mrt, vifi))
880 switch (vifc->mif6c_flags) {
881 #ifdef CONFIG_IPV6_PIMSM_V2
884 * Special Purpose VIF in PIM
885 * All the packets will be sent to the daemon
887 if (mrt->mroute_reg_vif_num >= 0)
889 dev = ip6mr_reg_vif(net, mrt);
892 err = dev_set_allmulti(dev, 1);
894 unregister_netdevice(dev);
901 dev = dev_get_by_index(net, vifc->mif6c_pifi);
903 return -EADDRNOTAVAIL;
904 err = dev_set_allmulti(dev, 1);
914 in6_dev = __in6_dev_get(dev);
916 in6_dev->cnf.mc_forwarding++;
917 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
918 NETCONFA_MC_FORWARDING,
919 dev->ifindex, &in6_dev->cnf);
922 /* Fill in the VIF structures */
923 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
924 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
927 /* And finish update writing critical data */
928 write_lock_bh(&mrt_lock);
930 #ifdef CONFIG_IPV6_PIMSM_V2
931 if (v->flags & MIFF_REGISTER)
932 mrt->mroute_reg_vif_num = vifi;
934 if (vifi + 1 > mrt->maxvif)
935 mrt->maxvif = vifi + 1;
936 write_unlock_bh(&mrt_lock);
937 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
942 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
943 const struct in6_addr *origin,
944 const struct in6_addr *mcastgrp)
946 struct mfc6_cache_cmp_arg arg = {
947 .mf6c_origin = *origin,
948 .mf6c_mcastgrp = *mcastgrp,
951 return mr_mfc_find(mrt, &arg);
954 /* Look for a (*,G) entry */
955 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
956 struct in6_addr *mcastgrp,
959 struct mfc6_cache_cmp_arg arg = {
960 .mf6c_origin = in6addr_any,
961 .mf6c_mcastgrp = *mcastgrp,
964 if (ipv6_addr_any(mcastgrp))
965 return mr_mfc_find_any_parent(mrt, mifi);
966 return mr_mfc_find_any(mrt, mifi, &arg);
969 /* Look for a (S,G,iif) entry if parent != -1 */
970 static struct mfc6_cache *
971 ip6mr_cache_find_parent(struct mr_table *mrt,
972 const struct in6_addr *origin,
973 const struct in6_addr *mcastgrp,
976 struct mfc6_cache_cmp_arg arg = {
977 .mf6c_origin = *origin,
978 .mf6c_mcastgrp = *mcastgrp,
981 return mr_mfc_find_parent(mrt, &arg, parent);
984 /* Allocate a multicast cache entry */
985 static struct mfc6_cache *ip6mr_cache_alloc(void)
987 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
990 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
991 c->_c.mfc_un.res.minvif = MAXMIFS;
992 c->_c.free = ip6mr_cache_free_rcu;
993 refcount_set(&c->_c.mfc_un.res.refcount, 1);
997 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
999 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1002 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1003 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1008 * A cache entry has gone into a resolved state from queued
1011 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1012 struct mfc6_cache *uc, struct mfc6_cache *c)
1014 struct sk_buff *skb;
1017 * Play the pending entries through our router
1020 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1021 if (ipv6_hdr(skb)->version == 0) {
1022 struct nlmsghdr *nlh = skb_pull(skb,
1023 sizeof(struct ipv6hdr));
1025 if (mr_fill_mroute(mrt, skb, &c->_c,
1026 nlmsg_data(nlh)) > 0) {
1027 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1029 nlh->nlmsg_type = NLMSG_ERROR;
1030 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1031 skb_trim(skb, nlh->nlmsg_len);
1032 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1034 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1036 ip6_mr_forward(net, mrt, skb, c);
1041 * Bounce a cache query up to pim6sd and netlink.
1043 * Called under mrt_lock.
1046 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1047 mifi_t mifi, int assert)
1049 struct sock *mroute6_sk;
1050 struct sk_buff *skb;
1051 struct mrt6msg *msg;
1054 #ifdef CONFIG_IPV6_PIMSM_V2
1055 if (assert == MRT6MSG_WHOLEPKT)
1056 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1060 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1065 /* I suppose that internal messages
1066 * do not require checksums */
1068 skb->ip_summed = CHECKSUM_UNNECESSARY;
1070 #ifdef CONFIG_IPV6_PIMSM_V2
1071 if (assert == MRT6MSG_WHOLEPKT) {
1072 /* Ugly, but we have no choice with this interface.
1073 Duplicate old header, fix length etc.
1074 And all this only to mangle msg->im6_msgtype and
1075 to set msg->im6_mbz to "mbz" :-)
1077 skb_push(skb, -skb_network_offset(pkt));
1079 skb_push(skb, sizeof(*msg));
1080 skb_reset_transport_header(skb);
1081 msg = (struct mrt6msg *)skb_transport_header(skb);
1083 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1084 msg->im6_mif = mrt->mroute_reg_vif_num;
1086 msg->im6_src = ipv6_hdr(pkt)->saddr;
1087 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1089 skb->ip_summed = CHECKSUM_UNNECESSARY;
1094 * Copy the IP header
1097 skb_put(skb, sizeof(struct ipv6hdr));
1098 skb_reset_network_header(skb);
1099 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1104 skb_put(skb, sizeof(*msg));
1105 skb_reset_transport_header(skb);
1106 msg = (struct mrt6msg *)skb_transport_header(skb);
1109 msg->im6_msgtype = assert;
1110 msg->im6_mif = mifi;
1112 msg->im6_src = ipv6_hdr(pkt)->saddr;
1113 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1115 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1120 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1127 mrt6msg_netlink_event(mrt, skb);
1129 /* Deliver to user space multicast routing algorithms */
1130 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1133 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1140 /* Queue a packet for resolution. It gets locked cache entry! */
1141 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1142 struct sk_buff *skb)
1144 struct mfc6_cache *c;
1148 spin_lock_bh(&mfc_unres_lock);
1149 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1150 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1151 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1159 * Create a new entry if allowable
1162 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1163 (c = ip6mr_cache_alloc_unres()) == NULL) {
1164 spin_unlock_bh(&mfc_unres_lock);
1170 /* Fill in the new cache entry */
1171 c->_c.mfc_parent = -1;
1172 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1173 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1176 * Reflect first query at pim6sd
1178 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1180 /* If the report failed throw the cache entry
1183 spin_unlock_bh(&mfc_unres_lock);
1185 ip6mr_cache_free(c);
1190 atomic_inc(&mrt->cache_resolve_queue_len);
1191 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1192 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1194 ipmr_do_expire_process(mrt);
1197 /* See if we can append the packet */
1198 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1202 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1206 spin_unlock_bh(&mfc_unres_lock);
1211 * MFC6 cache manipulation by user space
1214 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1217 struct mfc6_cache *c;
1219 /* The entries are added/deleted only under RTNL */
1221 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1222 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1226 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1227 list_del_rcu(&c->_c.list);
1229 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1230 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1231 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1232 mr_cache_put(&c->_c);
1236 static int ip6mr_device_event(struct notifier_block *this,
1237 unsigned long event, void *ptr)
1239 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1240 struct net *net = dev_net(dev);
1241 struct mr_table *mrt;
1242 struct vif_device *v;
1245 if (event != NETDEV_UNREGISTER)
1248 ip6mr_for_each_table(mrt, net) {
1249 v = &mrt->vif_table[0];
1250 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1252 mif6_delete(mrt, ct, 1, NULL);
1259 static unsigned int ip6mr_seq_read(struct net *net)
1263 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1266 static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1268 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1269 ip6mr_mr_table_iter, &mrt_lock);
1272 static struct notifier_block ip6_mr_notifier = {
1273 .notifier_call = ip6mr_device_event
1276 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1277 .family = RTNL_FAMILY_IP6MR,
1278 .fib_seq_read = ip6mr_seq_read,
1279 .fib_dump = ip6mr_dump,
1280 .owner = THIS_MODULE,
1283 static int __net_init ip6mr_notifier_init(struct net *net)
1285 struct fib_notifier_ops *ops;
1287 net->ipv6.ipmr_seq = 0;
1289 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1291 return PTR_ERR(ops);
1293 net->ipv6.ip6mr_notifier_ops = ops;
1298 static void __net_exit ip6mr_notifier_exit(struct net *net)
1300 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1301 net->ipv6.ip6mr_notifier_ops = NULL;
1304 /* Setup for IP multicast routing */
1305 static int __net_init ip6mr_net_init(struct net *net)
1309 err = ip6mr_notifier_init(net);
1313 err = ip6mr_rules_init(net);
1315 goto ip6mr_rules_fail;
1317 #ifdef CONFIG_PROC_FS
1319 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1321 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1322 goto proc_cache_fail;
1327 #ifdef CONFIG_PROC_FS
1329 remove_proc_entry("ip6_mr_vif", net->proc_net);
1331 ip6mr_rules_exit(net);
1334 ip6mr_notifier_exit(net);
1338 static void __net_exit ip6mr_net_exit(struct net *net)
1340 #ifdef CONFIG_PROC_FS
1341 remove_proc_entry("ip6_mr_cache", net->proc_net);
1342 remove_proc_entry("ip6_mr_vif", net->proc_net);
1344 ip6mr_rules_exit(net);
1345 ip6mr_notifier_exit(net);
1348 static struct pernet_operations ip6mr_net_ops = {
1349 .init = ip6mr_net_init,
1350 .exit = ip6mr_net_exit,
1353 int __init ip6_mr_init(void)
1357 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1358 sizeof(struct mfc6_cache),
1359 0, SLAB_HWCACHE_ALIGN,
1364 err = register_pernet_subsys(&ip6mr_net_ops);
1366 goto reg_pernet_fail;
1368 err = register_netdevice_notifier(&ip6_mr_notifier);
1370 goto reg_notif_fail;
1371 #ifdef CONFIG_IPV6_PIMSM_V2
1372 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1373 pr_err("%s: can't add PIM protocol\n", __func__);
1375 goto add_proto_fail;
1378 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1379 NULL, ip6mr_rtm_dumproute, 0);
1383 #ifdef CONFIG_IPV6_PIMSM_V2
1384 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1386 unregister_netdevice_notifier(&ip6_mr_notifier);
1389 unregister_pernet_subsys(&ip6mr_net_ops);
1391 kmem_cache_destroy(mrt_cachep);
1395 void ip6_mr_cleanup(void)
1397 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1398 #ifdef CONFIG_IPV6_PIMSM_V2
1399 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1401 unregister_netdevice_notifier(&ip6_mr_notifier);
1402 unregister_pernet_subsys(&ip6mr_net_ops);
1403 kmem_cache_destroy(mrt_cachep);
1406 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1407 struct mf6cctl *mfc, int mrtsock, int parent)
1409 unsigned char ttls[MAXMIFS];
1410 struct mfc6_cache *uc, *c;
1415 if (mfc->mf6cc_parent >= MAXMIFS)
1418 memset(ttls, 255, MAXMIFS);
1419 for (i = 0; i < MAXMIFS; i++) {
1420 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1424 /* The entries are added/deleted only under RTNL */
1426 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1427 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1430 write_lock_bh(&mrt_lock);
1431 c->_c.mfc_parent = mfc->mf6cc_parent;
1432 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1434 c->_c.mfc_flags |= MFC_STATIC;
1435 write_unlock_bh(&mrt_lock);
1436 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1438 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1442 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1443 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1446 c = ip6mr_cache_alloc();
1450 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1451 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1452 c->_c.mfc_parent = mfc->mf6cc_parent;
1453 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1455 c->_c.mfc_flags |= MFC_STATIC;
1457 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1460 pr_err("ip6mr: rhtable insert error %d\n", err);
1461 ip6mr_cache_free(c);
1464 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1466 /* Check to see if we resolved a queued list. If so we
1467 * need to send on the frames and tidy up.
1470 spin_lock_bh(&mfc_unres_lock);
1471 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1472 uc = (struct mfc6_cache *)_uc;
1473 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1474 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1475 list_del(&_uc->list);
1476 atomic_dec(&mrt->cache_resolve_queue_len);
1481 if (list_empty(&mrt->mfc_unres_queue))
1482 del_timer(&mrt->ipmr_expire_timer);
1483 spin_unlock_bh(&mfc_unres_lock);
1486 ip6mr_cache_resolve(net, mrt, uc, c);
1487 ip6mr_cache_free(uc);
1489 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1491 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1496 * Close the multicast socket, and clear the vif tables etc
1499 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1501 struct mr_mfc *c, *tmp;
1505 /* Shut down all active vif entries */
1506 for (i = 0; i < mrt->maxvif; i++) {
1507 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1509 mif6_delete(mrt, i, 0, &list);
1511 unregister_netdevice_many(&list);
1513 /* Wipe the cache */
1514 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1515 if (!all && (c->mfc_flags & MFC_STATIC))
1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1518 list_del_rcu(&c->list);
1519 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1523 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1524 spin_lock_bh(&mfc_unres_lock);
1525 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1527 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1528 FIB_EVENT_ENTRY_DEL,
1529 (struct mfc6_cache *)c,
1531 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1533 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1535 spin_unlock_bh(&mfc_unres_lock);
1539 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1542 struct net *net = sock_net(sk);
1545 write_lock_bh(&mrt_lock);
1546 if (rtnl_dereference(mrt->mroute_sk)) {
1549 rcu_assign_pointer(mrt->mroute_sk, sk);
1550 sock_set_flag(sk, SOCK_RCU_FREE);
1551 net->ipv6.devconf_all->mc_forwarding++;
1553 write_unlock_bh(&mrt_lock);
1556 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1557 NETCONFA_MC_FORWARDING,
1558 NETCONFA_IFINDEX_ALL,
1559 net->ipv6.devconf_all);
1565 int ip6mr_sk_done(struct sock *sk)
1568 struct net *net = sock_net(sk);
1569 struct mr_table *mrt;
1571 if (sk->sk_type != SOCK_RAW ||
1572 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1576 ip6mr_for_each_table(mrt, net) {
1577 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1578 write_lock_bh(&mrt_lock);
1579 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1580 /* Note that mroute_sk had SOCK_RCU_FREE set,
1581 * so the RCU grace period before sk freeing
1582 * is guaranteed by sk_destruct()
1584 net->ipv6.devconf_all->mc_forwarding--;
1585 write_unlock_bh(&mrt_lock);
1586 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1587 NETCONFA_MC_FORWARDING,
1588 NETCONFA_IFINDEX_ALL,
1589 net->ipv6.devconf_all);
1591 mroute_clean_tables(mrt, false);
1601 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1603 struct mr_table *mrt;
1604 struct flowi6 fl6 = {
1605 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1606 .flowi6_oif = skb->dev->ifindex,
1607 .flowi6_mark = skb->mark,
1610 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1613 return rcu_access_pointer(mrt->mroute_sk);
1615 EXPORT_SYMBOL(mroute6_is_socket);
1618 * Socket options and virtual interface manipulation. The whole
1619 * virtual interface system is a complete heap, but unfortunately
1620 * that's how BSD mrouted happens to think. Maybe one day with a proper
1621 * MOSPF/PIM router set up we can clean this up.
1624 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1626 int ret, parent = 0;
1630 struct net *net = sock_net(sk);
1631 struct mr_table *mrt;
1633 if (sk->sk_type != SOCK_RAW ||
1634 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1637 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1641 if (optname != MRT6_INIT) {
1642 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1643 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1649 if (optlen < sizeof(int))
1652 return ip6mr_sk_init(mrt, sk);
1655 return ip6mr_sk_done(sk);
1658 if (optlen < sizeof(vif))
1660 if (copy_from_user(&vif, optval, sizeof(vif)))
1662 if (vif.mif6c_mifi >= MAXMIFS)
1665 ret = mif6_add(net, mrt, &vif,
1666 sk == rtnl_dereference(mrt->mroute_sk));
1671 if (optlen < sizeof(mifi_t))
1673 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1676 ret = mif6_delete(mrt, mifi, 0, NULL);
1681 * Manipulate the forwarding caches. These live
1682 * in a sort of kernel/user symbiosis.
1688 case MRT6_ADD_MFC_PROXY:
1689 case MRT6_DEL_MFC_PROXY:
1690 if (optlen < sizeof(mfc))
1692 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1695 parent = mfc.mf6cc_parent;
1697 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1698 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1700 ret = ip6mr_mfc_add(net, mrt, &mfc,
1702 rtnl_dereference(mrt->mroute_sk),
1708 * Control PIM assert (to activate pim will activate assert)
1714 if (optlen != sizeof(v))
1716 if (get_user(v, (int __user *)optval))
1718 mrt->mroute_do_assert = v;
1722 #ifdef CONFIG_IPV6_PIMSM_V2
1727 if (optlen != sizeof(v))
1729 if (get_user(v, (int __user *)optval))
1734 if (v != mrt->mroute_do_pim) {
1735 mrt->mroute_do_pim = v;
1736 mrt->mroute_do_assert = v;
1743 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1748 if (optlen != sizeof(u32))
1750 if (get_user(v, (u32 __user *)optval))
1752 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1753 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1755 if (sk == rcu_access_pointer(mrt->mroute_sk))
1760 if (!ip6mr_new_table(net, v))
1762 raw6_sk(sk)->ip6mr_table = v;
1768 * Spurious command, or MRT6_VERSION which you cannot
1772 return -ENOPROTOOPT;
1777 * Getsock opt support for the multicast routing system.
1780 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1785 struct net *net = sock_net(sk);
1786 struct mr_table *mrt;
1788 if (sk->sk_type != SOCK_RAW ||
1789 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1792 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1800 #ifdef CONFIG_IPV6_PIMSM_V2
1802 val = mrt->mroute_do_pim;
1806 val = mrt->mroute_do_assert;
1809 return -ENOPROTOOPT;
1812 if (get_user(olr, optlen))
1815 olr = min_t(int, olr, sizeof(int));
1819 if (put_user(olr, optlen))
1821 if (copy_to_user(optval, &val, olr))
1827 * The IP multicast ioctl support routines.
1830 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1832 struct sioc_sg_req6 sr;
1833 struct sioc_mif_req6 vr;
1834 struct vif_device *vif;
1835 struct mfc6_cache *c;
1836 struct net *net = sock_net(sk);
1837 struct mr_table *mrt;
1839 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1844 case SIOCGETMIFCNT_IN6:
1845 if (copy_from_user(&vr, arg, sizeof(vr)))
1847 if (vr.mifi >= mrt->maxvif)
1849 read_lock(&mrt_lock);
1850 vif = &mrt->vif_table[vr.mifi];
1851 if (VIF_EXISTS(mrt, vr.mifi)) {
1852 vr.icount = vif->pkt_in;
1853 vr.ocount = vif->pkt_out;
1854 vr.ibytes = vif->bytes_in;
1855 vr.obytes = vif->bytes_out;
1856 read_unlock(&mrt_lock);
1858 if (copy_to_user(arg, &vr, sizeof(vr)))
1862 read_unlock(&mrt_lock);
1863 return -EADDRNOTAVAIL;
1864 case SIOCGETSGCNT_IN6:
1865 if (copy_from_user(&sr, arg, sizeof(sr)))
1869 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1871 sr.pktcnt = c->_c.mfc_un.res.pkt;
1872 sr.bytecnt = c->_c.mfc_un.res.bytes;
1873 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1876 if (copy_to_user(arg, &sr, sizeof(sr)))
1881 return -EADDRNOTAVAIL;
1883 return -ENOIOCTLCMD;
1887 #ifdef CONFIG_COMPAT
1888 struct compat_sioc_sg_req6 {
1889 struct sockaddr_in6 src;
1890 struct sockaddr_in6 grp;
1891 compat_ulong_t pktcnt;
1892 compat_ulong_t bytecnt;
1893 compat_ulong_t wrong_if;
1896 struct compat_sioc_mif_req6 {
1898 compat_ulong_t icount;
1899 compat_ulong_t ocount;
1900 compat_ulong_t ibytes;
1901 compat_ulong_t obytes;
1904 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1906 struct compat_sioc_sg_req6 sr;
1907 struct compat_sioc_mif_req6 vr;
1908 struct vif_device *vif;
1909 struct mfc6_cache *c;
1910 struct net *net = sock_net(sk);
1911 struct mr_table *mrt;
1913 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1918 case SIOCGETMIFCNT_IN6:
1919 if (copy_from_user(&vr, arg, sizeof(vr)))
1921 if (vr.mifi >= mrt->maxvif)
1923 read_lock(&mrt_lock);
1924 vif = &mrt->vif_table[vr.mifi];
1925 if (VIF_EXISTS(mrt, vr.mifi)) {
1926 vr.icount = vif->pkt_in;
1927 vr.ocount = vif->pkt_out;
1928 vr.ibytes = vif->bytes_in;
1929 vr.obytes = vif->bytes_out;
1930 read_unlock(&mrt_lock);
1932 if (copy_to_user(arg, &vr, sizeof(vr)))
1936 read_unlock(&mrt_lock);
1937 return -EADDRNOTAVAIL;
1938 case SIOCGETSGCNT_IN6:
1939 if (copy_from_user(&sr, arg, sizeof(sr)))
1943 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1945 sr.pktcnt = c->_c.mfc_un.res.pkt;
1946 sr.bytecnt = c->_c.mfc_un.res.bytes;
1947 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1950 if (copy_to_user(arg, &sr, sizeof(sr)))
1955 return -EADDRNOTAVAIL;
1957 return -ENOIOCTLCMD;
1962 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1964 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1965 IPSTATS_MIB_OUTFORWDATAGRAMS);
1966 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1967 IPSTATS_MIB_OUTOCTETS, skb->len);
1968 return dst_output(net, sk, skb);
1972 * Processing handlers for ip6mr_forward
1975 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1976 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1978 struct ipv6hdr *ipv6h;
1979 struct vif_device *vif = &mrt->vif_table[vifi];
1980 struct net_device *dev;
1981 struct dst_entry *dst;
1987 #ifdef CONFIG_IPV6_PIMSM_V2
1988 if (vif->flags & MIFF_REGISTER) {
1990 vif->bytes_out += skb->len;
1991 vif->dev->stats.tx_bytes += skb->len;
1992 vif->dev->stats.tx_packets++;
1993 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1998 ipv6h = ipv6_hdr(skb);
2000 fl6 = (struct flowi6) {
2001 .flowi6_oif = vif->link,
2002 .daddr = ipv6h->daddr,
2005 dst = ip6_route_output(net, NULL, &fl6);
2012 skb_dst_set(skb, dst);
2015 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2016 * not only before forwarding, but after forwarding on all output
2017 * interfaces. It is clear, if mrouter runs a multicasting
2018 * program, it should receive packets not depending to what interface
2019 * program is joined.
2020 * If we will not make it, the program will have to join on all
2021 * interfaces. On the other hand, multihoming host (or router, but
2022 * not mrouter) cannot join to more than one interface - it will
2023 * result in receiving multiple packets.
2028 vif->bytes_out += skb->len;
2030 /* We are about to write */
2031 /* XXX: extension headers? */
2032 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2035 ipv6h = ipv6_hdr(skb);
2038 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2040 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2041 net, NULL, skb, skb->dev, dev,
2042 ip6mr_forward2_finish);
2049 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2053 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2054 if (mrt->vif_table[ct].dev == dev)
2060 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2061 struct sk_buff *skb, struct mfc6_cache *c)
2065 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2067 vif = c->_c.mfc_parent;
2068 c->_c.mfc_un.res.pkt++;
2069 c->_c.mfc_un.res.bytes += skb->len;
2070 c->_c.mfc_un.res.lastuse = jiffies;
2072 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2073 struct mfc6_cache *cache_proxy;
2075 /* For an (*,G) entry, we only check that the incoming
2076 * interface is part of the static tree.
2079 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2081 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2089 * Wrong interface: drop packet and (maybe) send PIM assert.
2091 if (mrt->vif_table[vif].dev != skb->dev) {
2092 c->_c.mfc_un.res.wrong_if++;
2094 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2095 /* pimsm uses asserts, when switching from RPT to SPT,
2096 so that we cannot check that packet arrived on an oif.
2097 It is bad, but otherwise we would need to move pretty
2098 large chunk of pimd to kernel. Ough... --ANK
2100 (mrt->mroute_do_pim ||
2101 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2103 c->_c.mfc_un.res.last_assert +
2104 MFC_ASSERT_THRESH)) {
2105 c->_c.mfc_un.res.last_assert = jiffies;
2106 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2112 mrt->vif_table[vif].pkt_in++;
2113 mrt->vif_table[vif].bytes_in += skb->len;
2118 if (ipv6_addr_any(&c->mf6c_origin) &&
2119 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2120 if (true_vifi >= 0 &&
2121 true_vifi != c->_c.mfc_parent &&
2122 ipv6_hdr(skb)->hop_limit >
2123 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2124 /* It's an (*,*) entry and the packet is not coming from
2125 * the upstream: forward the packet to the upstream
2128 psend = c->_c.mfc_parent;
2133 for (ct = c->_c.mfc_un.res.maxvif - 1;
2134 ct >= c->_c.mfc_un.res.minvif; ct--) {
2135 /* For (*,G) entry, don't forward to the incoming interface */
2136 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2137 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2139 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2141 ip6mr_forward2(net, mrt, skb2,
2149 ip6mr_forward2(net, mrt, skb, c, psend);
2159 * Multicast packets for forwarding arrive here
2162 int ip6_mr_input(struct sk_buff *skb)
2164 struct mfc6_cache *cache;
2165 struct net *net = dev_net(skb->dev);
2166 struct mr_table *mrt;
2167 struct flowi6 fl6 = {
2168 .flowi6_iif = skb->dev->ifindex,
2169 .flowi6_mark = skb->mark,
2173 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2179 read_lock(&mrt_lock);
2180 cache = ip6mr_cache_find(mrt,
2181 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2183 int vif = ip6mr_find_vif(mrt, skb->dev);
2186 cache = ip6mr_cache_find_any(mrt,
2187 &ipv6_hdr(skb)->daddr,
2192 * No usable cache entry
2197 vif = ip6mr_find_vif(mrt, skb->dev);
2199 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2200 read_unlock(&mrt_lock);
2204 read_unlock(&mrt_lock);
2209 ip6_mr_forward(net, mrt, skb, cache);
2211 read_unlock(&mrt_lock);
2216 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2220 struct mr_table *mrt;
2221 struct mfc6_cache *cache;
2222 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2224 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2228 read_lock(&mrt_lock);
2229 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2230 if (!cache && skb->dev) {
2231 int vif = ip6mr_find_vif(mrt, skb->dev);
2234 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2239 struct sk_buff *skb2;
2240 struct ipv6hdr *iph;
2241 struct net_device *dev;
2245 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2246 read_unlock(&mrt_lock);
2250 /* really correct? */
2251 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2253 read_unlock(&mrt_lock);
2257 NETLINK_CB(skb2).portid = portid;
2258 skb_reset_transport_header(skb2);
2260 skb_put(skb2, sizeof(struct ipv6hdr));
2261 skb_reset_network_header(skb2);
2263 iph = ipv6_hdr(skb2);
2266 iph->flow_lbl[0] = 0;
2267 iph->flow_lbl[1] = 0;
2268 iph->flow_lbl[2] = 0;
2269 iph->payload_len = 0;
2270 iph->nexthdr = IPPROTO_NONE;
2272 iph->saddr = rt->rt6i_src.addr;
2273 iph->daddr = rt->rt6i_dst.addr;
2275 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2276 read_unlock(&mrt_lock);
2281 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2282 read_unlock(&mrt_lock);
2286 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2287 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2290 struct nlmsghdr *nlh;
2294 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2298 rtm = nlmsg_data(nlh);
2299 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2300 rtm->rtm_dst_len = 128;
2301 rtm->rtm_src_len = 128;
2303 rtm->rtm_table = mrt->id;
2304 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2305 goto nla_put_failure;
2306 rtm->rtm_type = RTN_MULTICAST;
2307 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2308 if (c->_c.mfc_flags & MFC_STATIC)
2309 rtm->rtm_protocol = RTPROT_STATIC;
2311 rtm->rtm_protocol = RTPROT_MROUTED;
2314 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2315 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2316 goto nla_put_failure;
2317 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2318 /* do not break the dump if cache is unresolved */
2319 if (err < 0 && err != -ENOENT)
2320 goto nla_put_failure;
2322 nlmsg_end(skb, nlh);
2326 nlmsg_cancel(skb, nlh);
2330 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2331 u32 portid, u32 seq, struct mr_mfc *c,
2334 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2338 static int mr6_msgsize(bool unresolved, int maxvif)
2341 NLMSG_ALIGN(sizeof(struct rtmsg))
2342 + nla_total_size(4) /* RTA_TABLE */
2343 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2344 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2349 + nla_total_size(4) /* RTA_IIF */
2350 + nla_total_size(0) /* RTA_MULTIPATH */
2351 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2353 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2359 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2362 struct net *net = read_pnet(&mrt->net);
2363 struct sk_buff *skb;
2366 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2371 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2375 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2381 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2384 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2387 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2388 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2389 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2390 /* IP6MRA_CREPORT_SRC_ADDR */
2391 + nla_total_size(sizeof(struct in6_addr))
2392 /* IP6MRA_CREPORT_DST_ADDR */
2393 + nla_total_size(sizeof(struct in6_addr))
2394 /* IP6MRA_CREPORT_PKT */
2395 + nla_total_size(payloadlen)
2401 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2403 struct net *net = read_pnet(&mrt->net);
2404 struct nlmsghdr *nlh;
2405 struct rtgenmsg *rtgenm;
2406 struct mrt6msg *msg;
2407 struct sk_buff *skb;
2411 payloadlen = pkt->len - sizeof(struct mrt6msg);
2412 msg = (struct mrt6msg *)skb_transport_header(pkt);
2414 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2418 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2419 sizeof(struct rtgenmsg), 0);
2422 rtgenm = nlmsg_data(nlh);
2423 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2424 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2425 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2426 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2428 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2430 goto nla_put_failure;
2432 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2433 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2434 nla_data(nla), payloadlen))
2435 goto nla_put_failure;
2437 nlmsg_end(skb, nlh);
2439 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2443 nlmsg_cancel(skb, nlh);
2446 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2449 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2451 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2452 _ip6mr_fill_mroute, &mfc_unres_lock);