2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
31 #include <linux/config.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/errno.h>
37 #include <linux/timer.h>
39 #include <linux/kernel.h>
40 #include <linux/fcntl.h>
41 #include <linux/stat.h>
42 #include <linux/socket.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/inetdevice.h>
47 #include <linux/igmp.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/mroute.h>
51 #include <linux/init.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
63 #include <net/checksum.h>
65 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
66 #define CONFIG_IP_PIMSM 1
69 static struct sock *mroute_socket;
72 /* Big lock, protecting vif table, mrt cache and mroute socket state.
73 Note that the changes are semaphored via rtnl_lock.
76 static rwlock_t mrt_lock = RW_LOCK_UNLOCKED;
79 * Multicast router control variables
82 static struct vif_device vif_table[MAXVIFS]; /* Devices */
85 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87 static int mroute_do_assert; /* Set in PIM assert */
88 static int mroute_do_pim;
90 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
93 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95 /* Special spinlock for queue of unresolved entries */
96 static spinlock_t mfc_unres_lock = SPIN_LOCK_UNLOCKED;
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static kmem_cache_t *mrt_cachep;
108 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
109 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
110 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112 #ifdef CONFIG_IP_PIMSM_V2
113 static struct net_protocol pim_protocol;
116 static struct timer_list ipmr_expire_timer;
118 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
121 struct net_device *ipmr_new_tunnel(struct vifctl *v)
123 struct net_device *dev;
125 dev = __dev_get_by_name("tunl0");
131 struct ip_tunnel_parm p;
132 struct in_device *in_dev;
134 memset(&p, 0, sizeof(p));
135 p.iph.daddr = v->vifc_rmt_addr.s_addr;
136 p.iph.saddr = v->vifc_lcl_addr.s_addr;
139 p.iph.protocol = IPPROTO_IPIP;
140 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
141 ifr.ifr_ifru.ifru_data = (void*)&p;
143 oldfs = get_fs(); set_fs(KERNEL_DS);
144 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
149 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
150 dev->flags |= IFF_MULTICAST;
152 in_dev = __in_dev_get(dev);
153 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
155 in_dev->cnf.rp_filter = 0;
164 /* allow the register to be completed before unregistering. */
168 unregister_netdevice(dev);
172 #ifdef CONFIG_IP_PIMSM
174 static int reg_vif_num = -1;
176 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
178 read_lock(&mrt_lock);
179 ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len;
180 ((struct net_device_stats*)dev->priv)->tx_packets++;
181 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
182 read_unlock(&mrt_lock);
187 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
189 return (struct net_device_stats*)dev->priv;
192 static void reg_vif_setup(struct net_device *dev)
194 dev->type = ARPHRD_PIMREG;
195 dev->mtu = 1500 - sizeof(struct iphdr) - 8;
196 dev->flags = IFF_NOARP;
197 dev->hard_start_xmit = reg_vif_xmit;
198 dev->get_stats = reg_vif_get_stats;
199 dev->destructor = free_netdev;
202 static struct net_device *ipmr_reg_vif(void)
204 struct net_device *dev;
205 struct in_device *in_dev;
207 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
213 if (register_netdevice(dev)) {
219 if ((in_dev = inetdev_init(dev)) == NULL)
222 in_dev->cnf.rp_filter = 0;
230 /* allow the register to be completed before unregistering. */
234 unregister_netdevice(dev);
243 static int vif_delete(int vifi)
245 struct vif_device *v;
246 struct net_device *dev;
247 struct in_device *in_dev;
249 if (vifi < 0 || vifi >= maxvif)
250 return -EADDRNOTAVAIL;
252 v = &vif_table[vifi];
254 write_lock_bh(&mrt_lock);
259 write_unlock_bh(&mrt_lock);
260 return -EADDRNOTAVAIL;
263 #ifdef CONFIG_IP_PIMSM
264 if (vifi == reg_vif_num)
268 if (vifi+1 == maxvif) {
270 for (tmp=vifi-1; tmp>=0; tmp--) {
277 write_unlock_bh(&mrt_lock);
279 dev_set_allmulti(dev, -1);
281 if ((in_dev = __in_dev_get(dev)) != NULL) {
282 in_dev->cnf.mc_forwarding--;
283 ip_rt_multicast_event(in_dev);
286 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
287 unregister_netdevice(dev);
293 /* Destroy an unresolved cache entry, killing queued skbs
294 and reporting error to netlink readers.
297 static void ipmr_destroy_unres(struct mfc_cache *c)
301 atomic_dec(&cache_resolve_queue_len);
303 while((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
304 if (skb->nh.iph->version == 0) {
305 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
306 nlh->nlmsg_type = NLMSG_ERROR;
307 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
308 skb_trim(skb, nlh->nlmsg_len);
309 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
310 netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
315 kmem_cache_free(mrt_cachep, c);
319 /* Single timer process for all the unresolved queue. */
321 static void ipmr_expire_process(unsigned long dummy)
324 unsigned long expires;
325 struct mfc_cache *c, **cp;
327 if (!spin_trylock(&mfc_unres_lock)) {
328 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
332 if (atomic_read(&cache_resolve_queue_len) == 0)
337 cp = &mfc_unres_queue;
339 while ((c=*cp) != NULL) {
340 if (time_after(c->mfc_un.unres.expires, now)) {
341 unsigned long interval = c->mfc_un.unres.expires - now;
342 if (interval < expires)
350 ipmr_destroy_unres(c);
353 if (atomic_read(&cache_resolve_queue_len))
354 mod_timer(&ipmr_expire_timer, jiffies + expires);
357 spin_unlock(&mfc_unres_lock);
360 /* Fill oifs list. It is called under write locked mrt_lock. */
362 static void ipmr_update_threshoulds(struct mfc_cache *cache, unsigned char *ttls)
366 cache->mfc_un.res.minvif = MAXVIFS;
367 cache->mfc_un.res.maxvif = 0;
368 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
370 for (vifi=0; vifi<maxvif; vifi++) {
371 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
372 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
373 if (cache->mfc_un.res.minvif > vifi)
374 cache->mfc_un.res.minvif = vifi;
375 if (cache->mfc_un.res.maxvif <= vifi)
376 cache->mfc_un.res.maxvif = vifi + 1;
381 static int vif_add(struct vifctl *vifc, int mrtsock)
383 int vifi = vifc->vifc_vifi;
384 struct vif_device *v = &vif_table[vifi];
385 struct net_device *dev;
386 struct in_device *in_dev;
389 if (VIF_EXISTS(vifi))
392 switch (vifc->vifc_flags) {
393 #ifdef CONFIG_IP_PIMSM
396 * Special Purpose VIF in PIM
397 * All the packets will be sent to the daemon
399 if (reg_vif_num >= 0)
401 dev = ipmr_reg_vif();
407 dev = ipmr_new_tunnel(vifc);
412 dev=ip_dev_find(vifc->vifc_lcl_addr.s_addr);
414 return -EADDRNOTAVAIL;
421 if ((in_dev = __in_dev_get(dev)) == NULL)
422 return -EADDRNOTAVAIL;
423 in_dev->cnf.mc_forwarding++;
424 dev_set_allmulti(dev, +1);
425 ip_rt_multicast_event(in_dev);
428 * Fill in the VIF structures
430 v->rate_limit=vifc->vifc_rate_limit;
431 v->local=vifc->vifc_lcl_addr.s_addr;
432 v->remote=vifc->vifc_rmt_addr.s_addr;
433 v->flags=vifc->vifc_flags;
435 v->flags |= VIFF_STATIC;
436 v->threshold=vifc->vifc_threshold;
441 v->link = dev->ifindex;
442 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
443 v->link = dev->iflink;
445 /* And finish update writing critical data */
446 write_lock_bh(&mrt_lock);
449 #ifdef CONFIG_IP_PIMSM
450 if (v->flags&VIFF_REGISTER)
455 write_unlock_bh(&mrt_lock);
459 #if defined(CONFIG_MIPS_BRCM)
460 static struct mfc_cache *ipmr_cache_find(__u32 origin, __u32 mcastgrp,unsigned int ifindex)
462 int line=MFC_HASH(mcastgrp,htonl(0x00000000),ifindex);
465 for (c=mfc_cache_array[line]; c; c = c->next) {
466 if ( c->mfc_mcastgrp==mcastgrp)
472 static struct mfc_cache *ipmr_cache_find(__u32 origin, __u32 mcastgrp)
474 int line=MFC_HASH(mcastgrp,origin);
477 for (c=mfc_cache_array[line]; c; c = c->next) {
478 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
486 * Allocate a multicast cache entry
488 static struct mfc_cache *ipmr_cache_alloc(void)
490 struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
493 memset(c, 0, sizeof(*c));
494 c->mfc_un.res.minvif = MAXVIFS;
498 static struct mfc_cache *ipmr_cache_alloc_unres(void)
500 struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
503 memset(c, 0, sizeof(*c));
504 skb_queue_head_init(&c->mfc_un.unres.unresolved);
505 c->mfc_un.unres.expires = jiffies + 10*HZ;
510 * A cache entry has gone into a resolved state from queued
513 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
518 * Play the pending entries through our router
521 while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
522 if (skb->nh.iph->version == 0) {
524 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
526 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
527 nlh->nlmsg_len = skb->tail - (u8*)nlh;
529 nlh->nlmsg_type = NLMSG_ERROR;
530 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
531 skb_trim(skb, nlh->nlmsg_len);
532 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
534 err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
536 ip_mr_forward(skb, c, 0);
541 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
542 * expects the following bizarre scheme.
544 * Called under mrt_lock.
547 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
550 int ihl = pkt->nh.iph->ihl<<2;
551 struct igmphdr *igmp;
555 #ifdef CONFIG_IP_PIMSM
556 if (assert == IGMPMSG_WHOLEPKT)
557 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
560 skb = alloc_skb(128, GFP_ATOMIC);
565 #ifdef CONFIG_IP_PIMSM
566 if (assert == IGMPMSG_WHOLEPKT) {
567 /* Ugly, but we have no choice with this interface.
568 Duplicate old header, fix ihl, length etc.
569 And all this only to mangle msg->im_msgtype and
570 to set msg->im_mbz to "mbz" :-)
572 msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr));
573 skb->nh.raw = skb->h.raw = (u8*)msg;
574 memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
575 msg->im_msgtype = IGMPMSG_WHOLEPKT;
577 msg->im_vif = reg_vif_num;
578 skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
579 skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
588 skb->nh.iph = (struct iphdr *)skb_put(skb, ihl);
589 memcpy(skb->data,pkt->data,ihl);
590 skb->nh.iph->protocol = 0; /* Flag to the kernel this is a route add */
591 msg = (struct igmpmsg*)skb->nh.iph;
593 skb->dst = dst_clone(pkt->dst);
599 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
601 msg->im_msgtype = assert;
603 skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */
604 skb->h.raw = skb->nh.raw;
607 if (mroute_socket == NULL) {
615 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
617 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
625 * Queue a packet for resolution. It gets locked cache entry!
629 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
634 spin_lock_bh(&mfc_unres_lock);
635 for (c=mfc_unres_queue; c; c=c->next) {
636 if (c->mfc_mcastgrp == skb->nh.iph->daddr &&
637 c->mfc_origin == skb->nh.iph->saddr)
643 * Create a new entry if allowable
646 if (atomic_read(&cache_resolve_queue_len)>=10 ||
647 (c=ipmr_cache_alloc_unres())==NULL) {
648 spin_unlock_bh(&mfc_unres_lock);
655 * Fill in the new cache entry
658 c->mfc_origin=skb->nh.iph->saddr;
659 c->mfc_mcastgrp=skb->nh.iph->daddr;
662 * Reflect first query at mrouted.
664 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
665 /* If the report failed throw the cache entry
668 spin_unlock_bh(&mfc_unres_lock);
670 kmem_cache_free(mrt_cachep, c);
675 atomic_inc(&cache_resolve_queue_len);
676 c->next = mfc_unres_queue;
679 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
683 * See if we can append the packet
685 if (c->mfc_un.unres.unresolved.qlen>3) {
689 skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
693 spin_unlock_bh(&mfc_unres_lock);
698 * MFC cache manipulation by user space mroute daemon
701 static int ipmr_mfc_delete(struct mfcctl *mfc)
704 struct mfc_cache *c, **cp;
706 #if defined(CONFIG_MIPS_BRCM)
707 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr,mfc->mfcc_parent);
709 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
712 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
713 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
714 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
715 write_lock_bh(&mrt_lock);
717 write_unlock_bh(&mrt_lock);
719 kmem_cache_free(mrt_cachep, c);
726 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
729 struct mfc_cache *uc, *c, **cp;
731 #if defined(CONFIG_MIPS_BRCM)
732 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr,mfc->mfcc_parent);
734 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
737 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
738 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
739 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
744 write_lock_bh(&mrt_lock);
745 c->mfc_parent = mfc->mfcc_parent;
746 ipmr_update_threshoulds(c, mfc->mfcc_ttls);
748 c->mfc_flags |= MFC_STATIC;
749 write_unlock_bh(&mrt_lock);
753 if(!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
756 c=ipmr_cache_alloc();
760 c->mfc_origin=mfc->mfcc_origin.s_addr;
761 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
762 c->mfc_parent=mfc->mfcc_parent;
763 ipmr_update_threshoulds(c, mfc->mfcc_ttls);
765 c->mfc_flags |= MFC_STATIC;
767 write_lock_bh(&mrt_lock);
768 c->next = mfc_cache_array[line];
769 mfc_cache_array[line] = c;
770 write_unlock_bh(&mrt_lock);
773 * Check to see if we resolved a queued list. If so we
774 * need to send on the frames and tidy up.
776 spin_lock_bh(&mfc_unres_lock);
777 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
779 if (uc->mfc_origin == c->mfc_origin &&
780 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
782 if (atomic_dec_and_test(&cache_resolve_queue_len))
783 del_timer(&ipmr_expire_timer);
787 spin_unlock_bh(&mfc_unres_lock);
790 ipmr_cache_resolve(uc, c);
791 kmem_cache_free(mrt_cachep, uc);
797 * Close the multicast socket, and clear the vif tables etc
800 static void mroute_clean_tables(struct sock *sk)
805 * Shut down all active vif entries
807 for(i=0; i<maxvif; i++) {
808 if (!(vif_table[i].flags&VIFF_STATIC))
815 for (i=0;i<MFC_LINES;i++) {
816 struct mfc_cache *c, **cp;
818 cp = &mfc_cache_array[i];
819 while ((c = *cp) != NULL) {
820 if (c->mfc_flags&MFC_STATIC) {
824 write_lock_bh(&mrt_lock);
826 write_unlock_bh(&mrt_lock);
828 kmem_cache_free(mrt_cachep, c);
832 if (atomic_read(&cache_resolve_queue_len) != 0) {
835 spin_lock_bh(&mfc_unres_lock);
836 while (mfc_unres_queue != NULL) {
838 mfc_unres_queue = c->next;
839 spin_unlock_bh(&mfc_unres_lock);
841 ipmr_destroy_unres(c);
843 spin_lock_bh(&mfc_unres_lock);
845 spin_unlock_bh(&mfc_unres_lock);
849 static void mrtsock_destruct(struct sock *sk)
852 if (sk == mroute_socket) {
853 ipv4_devconf.mc_forwarding--;
855 write_lock_bh(&mrt_lock);
857 write_unlock_bh(&mrt_lock);
859 mroute_clean_tables(sk);
865 * Socket options and virtual interface manipulation. The whole
866 * virtual interface system is a complete heap, but unfortunately
867 * that's how BSD mrouted happens to think. Maybe one day with a proper
868 * MOSPF/PIM router set up we can clean this up.
871 int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
877 if(optname!=MRT_INIT)
879 if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))
886 if (sk->sk_type != SOCK_RAW ||
887 inet_sk(sk)->num != IPPROTO_IGMP)
889 if(optlen!=sizeof(int))
898 ret = ip_ra_control(sk, 1, mrtsock_destruct);
900 write_lock_bh(&mrt_lock);
902 write_unlock_bh(&mrt_lock);
904 ipv4_devconf.mc_forwarding++;
909 if (sk!=mroute_socket)
911 return ip_ra_control(sk, 0, NULL);
914 if(optlen!=sizeof(vif))
916 if (copy_from_user(&vif,optval,sizeof(vif)))
918 if(vif.vifc_vifi >= MAXVIFS)
921 if (optname==MRT_ADD_VIF) {
922 ret = vif_add(&vif, sk==mroute_socket);
924 ret = vif_delete(vif.vifc_vifi);
930 * Manipulate the forwarding caches. These live
931 * in a sort of kernel/user symbiosis.
935 if(optlen!=sizeof(mfc))
937 if (copy_from_user(&mfc,optval, sizeof(mfc)))
940 if (optname==MRT_DEL_MFC)
941 ret = ipmr_mfc_delete(&mfc);
943 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
947 * Control PIM assert.
952 if(get_user(v,(int __user *)optval))
954 mroute_do_assert=(v)?1:0;
957 #ifdef CONFIG_IP_PIMSM
961 if(get_user(v,(int __user *)optval))
966 if (v != mroute_do_pim) {
968 mroute_do_assert = v;
969 #ifdef CONFIG_IP_PIMSM_V2
971 ret = inet_add_protocol(&pim_protocol,
974 ret = inet_del_protocol(&pim_protocol,
985 * Spurious command, or MRT_VERSION which you cannot
994 * Getsock opt support for the multicast routing system.
997 int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
1002 if(optname!=MRT_VERSION &&
1003 #ifdef CONFIG_IP_PIMSM
1006 optname!=MRT_ASSERT)
1007 return -ENOPROTOOPT;
1009 if (get_user(olr, optlen))
1012 olr = min_t(unsigned int, olr, sizeof(int));
1016 if(put_user(olr,optlen))
1018 if(optname==MRT_VERSION)
1020 #ifdef CONFIG_IP_PIMSM
1021 else if(optname==MRT_PIM)
1025 val=mroute_do_assert;
1026 if(copy_to_user(optval,&val,olr))
1032 * The IP multicast ioctl support routines.
1035 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1037 struct sioc_sg_req sr;
1038 struct sioc_vif_req vr;
1039 struct vif_device *vif;
1040 #if defined(CONFIG_MIPS_BRCM)
1041 struct mfc_cache *c=NULL;
1043 struct mfc_cache *c;
1049 if (copy_from_user(&vr,arg,sizeof(vr)))
1053 read_lock(&mrt_lock);
1054 vif=&vif_table[vr.vifi];
1055 if(VIF_EXISTS(vr.vifi)) {
1056 vr.icount=vif->pkt_in;
1057 vr.ocount=vif->pkt_out;
1058 vr.ibytes=vif->bytes_in;
1059 vr.obytes=vif->bytes_out;
1060 read_unlock(&mrt_lock);
1062 if (copy_to_user(arg,&vr,sizeof(vr)))
1066 read_unlock(&mrt_lock);
1067 return -EADDRNOTAVAIL;
1069 if (copy_from_user(&sr,arg,sizeof(sr)))
1072 read_lock(&mrt_lock);
1073 #if defined(CONFIG_MIPS_BRCM)
1074 // c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1076 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1079 sr.pktcnt = c->mfc_un.res.pkt;
1080 sr.bytecnt = c->mfc_un.res.bytes;
1081 sr.wrong_if = c->mfc_un.res.wrong_if;
1082 read_unlock(&mrt_lock);
1084 if (copy_to_user(arg,&sr,sizeof(sr)))
1088 read_unlock(&mrt_lock);
1089 return -EADDRNOTAVAIL;
1091 return -ENOIOCTLCMD;
1096 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1098 struct vif_device *v;
1100 if (event != NETDEV_UNREGISTER)
1103 for(ct=0;ct<maxvif;ct++,v++) {
1111 static struct notifier_block ip_mr_notifier={
1112 .notifier_call = ipmr_device_event,
1116 * Encapsulate a packet by attaching a valid IPIP header to it.
1117 * This avoids tunnel drivers and other mess and gives us the speed so
1118 * important for multicast video.
1121 static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr)
1123 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
1126 iph->tos = skb->nh.iph->tos;
1127 iph->ttl = skb->nh.iph->ttl;
1131 iph->protocol = IPPROTO_IPIP;
1133 iph->tot_len = htons(skb->len);
1134 ip_select_ident(iph, skb->dst, NULL);
1137 skb->h.ipiph = skb->nh.iph;
1139 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1143 static inline int ipmr_forward_finish(struct sk_buff *skb)
1145 struct ip_options * opt = &(IPCB(skb)->opt);
1147 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
1149 if (unlikely(opt->optlen))
1150 ip_forward_options(skb);
1152 return dst_output(skb);
1156 * Processing handlers for ipmr_forward
1159 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1161 struct iphdr *iph = skb->nh.iph;
1162 struct vif_device *vif = &vif_table[vifi];
1163 struct net_device *dev;
1167 if (vif->dev == NULL)
1170 #ifdef CONFIG_IP_PIMSM
1171 if (vif->flags & VIFF_REGISTER) {
1173 vif->bytes_out+=skb->len;
1174 ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len;
1175 ((struct net_device_stats*)vif->dev->priv)->tx_packets++;
1176 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1182 if (vif->flags&VIFF_TUNNEL) {
1183 struct flowi fl = { .oif = vif->link,
1185 { .daddr = vif->remote,
1186 .saddr = vif->local,
1187 .tos = RT_TOS(iph->tos) } },
1188 .proto = IPPROTO_IPIP };
1189 if (ip_route_output_key(&rt, &fl))
1191 encap = sizeof(struct iphdr);
1193 struct flowi fl = { .oif = vif->link,
1195 { .daddr = iph->daddr,
1196 .tos = RT_TOS(iph->tos) } },
1197 .proto = IPPROTO_IPIP };
1198 if (ip_route_output_key(&rt, &fl))
1202 dev = rt->u.dst.dev;
1204 if (skb->len+encap > dst_pmtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1205 /* Do not fragment multicasts. Alas, IPv4 does not
1206 allow to send ICMP, so that packets will disappear
1210 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
1215 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1217 if (skb_cow(skb, encap)) {
1223 vif->bytes_out+=skb->len;
1225 dst_release(skb->dst);
1226 skb->dst = &rt->u.dst;
1228 ip_decrease_ttl(iph);
1230 /* FIXME: forward and output firewalls used to be called here.
1231 * What do we do with netfilter? -- RR */
1232 if (vif->flags & VIFF_TUNNEL) {
1233 ip_encap(skb, vif->local, vif->remote);
1234 /* FIXME: extra output firewall step used to be here. --RR */
1235 ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++;
1236 ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len;
1239 IPCB(skb)->flags |= IPSKB_FORWARDED;
1242 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1243 * not only before forwarding, but after forwarding on all output
1244 * interfaces. It is clear, if mrouter runs a multicasting
1245 * program, it should receive packets not depending to what interface
1246 * program is joined.
1247 * If we will not make it, the program will have to join on all
1248 * interfaces. On the other hand, multihoming host (or router, but
1249 * not mrouter) cannot join to more than one interface - it will
1250 * result in receiving multiple packets.
1252 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
1253 ipmr_forward_finish);
1261 static int ipmr_find_vif(struct net_device *dev)
1264 for (ct=maxvif-1; ct>=0; ct--) {
1265 if (vif_table[ct].dev == dev)
1271 /* "local" means that we should preserve one skb (for local delivery) */
1273 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1278 vif = cache->mfc_parent;
1279 cache->mfc_un.res.pkt++;
1280 cache->mfc_un.res.bytes += skb->len;
1283 * Wrong interface: drop packet and (maybe) send PIM assert.
1285 if (vif_table[vif].dev != skb->dev) {
1288 if (((struct rtable*)skb->dst)->fl.iif == 0) {
1289 /* It is our own packet, looped back.
1290 Very complicated situation...
1292 The best workaround until routing daemons will be
1293 fixed is not to redistribute packet, if it was
1294 send through wrong interface. It means, that
1295 multicast applications WILL NOT work for
1296 (S,G), which have default multicast route pointing
1297 to wrong oif. In any case, it is not a good
1298 idea to use multicasting applications on router.
1303 cache->mfc_un.res.wrong_if++;
1304 true_vifi = ipmr_find_vif(skb->dev);
1306 if (true_vifi >= 0 && mroute_do_assert &&
1307 /* pimsm uses asserts, when switching from RPT to SPT,
1308 so that we cannot check that packet arrived on an oif.
1309 It is bad, but otherwise we would need to move pretty
1310 large chunk of pimd to kernel. Ough... --ANK
1312 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1314 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1315 cache->mfc_un.res.last_assert = jiffies;
1316 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1321 vif_table[vif].pkt_in++;
1322 vif_table[vif].bytes_in+=skb->len;
1327 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1328 if (skb->nh.iph->ttl > cache->mfc_un.res.ttls[ct]) {
1330 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1332 ipmr_queue_xmit(skb2, cache, psend);
1339 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1341 ipmr_queue_xmit(skb2, cache, psend);
1343 ipmr_queue_xmit(skb, cache, psend);
1356 * Multicast packets for forwarding arrive here
1359 int ip_mr_input(struct sk_buff *skb)
1361 struct mfc_cache *cache;
1362 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1363 #if defined(CONFIG_MIPS_BRCM)
1364 struct net_device *dev=skb->dev;
1367 /* Packet is looped back after forward, it should not be
1368 forwarded second time, but still can be delivered locally.
1370 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1374 if (IPCB(skb)->opt.router_alert) {
1375 if (ip_call_ra_chain(skb))
1377 } else if (skb->nh.iph->protocol == IPPROTO_IGMP){
1378 /* IGMPv1 (and broken IGMPv2 implementations sort of
1379 Cisco IOS <= 11.2(8)) do not put router alert
1380 option to IGMP packets destined to routable
1381 groups. It is very bad, because it means
1382 that we can forward NO IGMP messages.
1384 read_lock(&mrt_lock);
1385 if (mroute_socket) {
1386 raw_rcv(mroute_socket, skb);
1387 read_unlock(&mrt_lock);
1390 read_unlock(&mrt_lock);
1394 read_lock(&mrt_lock);
1395 #if defined(CONFIG_MIPS_BRCM)
1396 cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr,dev->ifindex);
1398 cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr);
1403 * No usable cache entry
1409 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1410 ip_local_deliver(skb);
1412 read_unlock(&mrt_lock);
1418 vif = ipmr_find_vif(skb->dev);
1420 int err = ipmr_cache_unresolved(vif, skb);
1421 read_unlock(&mrt_lock);
1425 read_unlock(&mrt_lock);
1430 ip_mr_forward(skb, cache, local);
1432 read_unlock(&mrt_lock);
1435 return ip_local_deliver(skb);
1441 return ip_local_deliver(skb);
1446 #ifdef CONFIG_IP_PIMSM_V1
1448 * Handle IGMP messages of PIMv1
1451 int pim_rcv_v1(struct sk_buff * skb)
1453 struct igmphdr *pim;
1454 struct iphdr *encap;
1455 struct net_device *reg_dev = NULL;
1457 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1460 pim = (struct igmphdr*)skb->h.raw;
1462 if (!mroute_do_pim ||
1463 skb->len < sizeof(*pim) + sizeof(*encap) ||
1464 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1467 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
1470 a. packet is really destinted to a multicast group
1471 b. packet is not a NULL-REGISTER
1472 c. packet is not truncated
1474 if (!MULTICAST(encap->daddr) ||
1475 encap->tot_len == 0 ||
1476 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1479 read_lock(&mrt_lock);
1480 if (reg_vif_num >= 0)
1481 reg_dev = vif_table[reg_vif_num].dev;
1484 read_unlock(&mrt_lock);
1486 if (reg_dev == NULL)
1489 skb->mac.raw = skb->nh.raw;
1490 skb_pull(skb, (u8*)encap - skb->data);
1491 skb->nh.iph = (struct iphdr *)skb->data;
1493 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1494 skb->protocol = htons(ETH_P_IP);
1496 skb->pkt_type = PACKET_HOST;
1497 dst_release(skb->dst);
1499 ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
1500 ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
1511 #ifdef CONFIG_IP_PIMSM_V2
1512 static int pim_rcv(struct sk_buff * skb)
1514 struct pimreghdr *pim;
1515 struct iphdr *encap;
1516 struct net_device *reg_dev = NULL;
1518 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1521 pim = (struct pimreghdr*)skb->h.raw;
1522 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1523 (pim->flags&PIM_NULL_REGISTER) ||
1524 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1525 (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1528 /* check if the inner packet is destined to mcast group */
1529 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
1530 if (!MULTICAST(encap->daddr) ||
1531 encap->tot_len == 0 ||
1532 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1535 read_lock(&mrt_lock);
1536 if (reg_vif_num >= 0)
1537 reg_dev = vif_table[reg_vif_num].dev;
1540 read_unlock(&mrt_lock);
1542 if (reg_dev == NULL)
1545 skb->mac.raw = skb->nh.raw;
1546 skb_pull(skb, (u8*)encap - skb->data);
1547 skb->nh.iph = (struct iphdr *)skb->data;
1549 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1550 skb->protocol = htons(ETH_P_IP);
1552 skb->pkt_type = PACKET_HOST;
1553 dst_release(skb->dst);
1554 ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
1555 ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
1568 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1571 struct rtnexthop *nhp;
1572 struct net_device *dev = vif_table[c->mfc_parent].dev;
1574 struct rtattr *mp_head;
1577 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1579 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1581 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1582 if (c->mfc_un.res.ttls[ct] < 255) {
1583 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1584 goto rtattr_failure;
1585 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1586 nhp->rtnh_flags = 0;
1587 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1588 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1589 nhp->rtnh_len = sizeof(*nhp);
1592 mp_head->rta_type = RTA_MULTIPATH;
1593 mp_head->rta_len = skb->tail - (u8*)mp_head;
1594 rtm->rtm_type = RTN_MULTICAST;
1598 skb_trim(skb, b - skb->data);
1602 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1605 struct mfc_cache *cache;
1606 struct rtable *rt = (struct rtable*)skb->dst;
1607 #if defined(CONFIG_MIPS_BRCM)
1608 struct net_device *dev=skb->dev;
1611 read_lock(&mrt_lock);
1612 #if defined(CONFIG_MIPS_BRCM)
1613 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst,dev->ifindex);
1615 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1619 struct net_device *dev;
1623 read_unlock(&mrt_lock);
1628 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1629 read_unlock(&mrt_lock);
1632 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
1633 skb->nh.iph->ihl = sizeof(struct iphdr)>>2;
1634 skb->nh.iph->saddr = rt->rt_src;
1635 skb->nh.iph->daddr = rt->rt_dst;
1636 skb->nh.iph->version = 0;
1637 err = ipmr_cache_unresolved(vif, skb);
1638 read_unlock(&mrt_lock);
1642 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1643 cache->mfc_flags |= MFC_NOTIFY;
1644 err = ipmr_fill_mroute(skb, cache, rtm);
1645 read_unlock(&mrt_lock);
1649 #ifdef CONFIG_PROC_FS
1651 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1653 struct ipmr_vif_iter {
1657 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1660 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
1661 if(!VIF_EXISTS(iter->ct))
1664 return &vif_table[iter->ct];
1669 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1671 read_lock(&mrt_lock);
1672 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1676 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1678 struct ipmr_vif_iter *iter = seq->private;
1681 if (v == SEQ_START_TOKEN)
1682 return ipmr_vif_seq_idx(iter, 0);
1684 while (++iter->ct < maxvif) {
1685 if(!VIF_EXISTS(iter->ct))
1687 return &vif_table[iter->ct];
1692 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1694 read_unlock(&mrt_lock);
1697 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1699 if (v == SEQ_START_TOKEN) {
1701 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1703 const struct vif_device *vif = v;
1704 const char *name = vif->dev ? vif->dev->name : "none";
1707 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1709 name, vif->bytes_in, vif->pkt_in,
1710 vif->bytes_out, vif->pkt_out,
1711 vif->flags, vif->local, vif->remote);
1716 static struct seq_operations ipmr_vif_seq_ops = {
1717 .start = ipmr_vif_seq_start,
1718 .next = ipmr_vif_seq_next,
1719 .stop = ipmr_vif_seq_stop,
1720 .show = ipmr_vif_seq_show,
1723 static int ipmr_vif_open(struct inode *inode, struct file *file)
1725 struct seq_file *seq;
1727 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1732 rc = seq_open(file, &ipmr_vif_seq_ops);
1737 seq = file->private_data;
1747 static struct file_operations ipmr_vif_fops = {
1748 .owner = THIS_MODULE,
1749 .open = ipmr_vif_open,
1751 .llseek = seq_lseek,
1752 .release = seq_release_private,
1755 struct ipmr_mfc_iter {
1756 struct mfc_cache **cache;
1761 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1763 struct mfc_cache *mfc;
1765 it->cache = mfc_cache_array;
1766 read_lock(&mrt_lock);
1767 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1768 for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1771 read_unlock(&mrt_lock);
1773 it->cache = &mfc_unres_queue;
1774 spin_lock_bh(&mfc_unres_lock);
1775 for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1778 spin_unlock_bh(&mfc_unres_lock);
1785 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1787 struct ipmr_mfc_iter *it = seq->private;
1790 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1794 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1796 struct mfc_cache *mfc = v;
1797 struct ipmr_mfc_iter *it = seq->private;
1801 if (v == SEQ_START_TOKEN)
1802 return ipmr_mfc_seq_idx(seq->private, 0);
1807 if (it->cache == &mfc_unres_queue)
1810 BUG_ON(it->cache != mfc_cache_array);
1812 while (++it->ct < MFC_LINES) {
1813 mfc = mfc_cache_array[it->ct];
1818 /* exhausted cache_array, show unresolved */
1819 read_unlock(&mrt_lock);
1820 it->cache = &mfc_unres_queue;
1823 spin_lock_bh(&mfc_unres_lock);
1824 mfc = mfc_unres_queue;
1829 spin_unlock_bh(&mfc_unres_lock);
1835 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1837 struct ipmr_mfc_iter *it = seq->private;
1839 if (it->cache == &mfc_unres_queue)
1840 spin_unlock_bh(&mfc_unres_lock);
1841 else if (it->cache == mfc_cache_array)
1842 read_unlock(&mrt_lock);
1845 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1849 if (v == SEQ_START_TOKEN) {
1851 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1853 const struct mfc_cache *mfc = v;
1854 const struct ipmr_mfc_iter *it = seq->private;
1856 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1857 (unsigned long) mfc->mfc_mcastgrp,
1858 (unsigned long) mfc->mfc_origin,
1860 mfc->mfc_un.res.pkt,
1861 mfc->mfc_un.res.bytes,
1862 mfc->mfc_un.res.wrong_if);
1864 if (it->cache != &mfc_unres_queue) {
1865 for(n = mfc->mfc_un.res.minvif;
1866 n < mfc->mfc_un.res.maxvif; n++ ) {
1868 && mfc->mfc_un.res.ttls[n] < 255)
1871 n, mfc->mfc_un.res.ttls[n]);
1874 seq_putc(seq, '\n');
1879 static struct seq_operations ipmr_mfc_seq_ops = {
1880 .start = ipmr_mfc_seq_start,
1881 .next = ipmr_mfc_seq_next,
1882 .stop = ipmr_mfc_seq_stop,
1883 .show = ipmr_mfc_seq_show,
1886 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1888 struct seq_file *seq;
1890 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1895 rc = seq_open(file, &ipmr_mfc_seq_ops);
1899 seq = file->private_data;
1909 static struct file_operations ipmr_mfc_fops = {
1910 .owner = THIS_MODULE,
1911 .open = ipmr_mfc_open,
1913 .llseek = seq_lseek,
1914 .release = seq_release_private,
1918 #ifdef CONFIG_IP_PIMSM_V2
1919 static struct net_protocol pim_protocol = {
1926 * Setup for IP multicast routing
1929 void __init ip_mr_init(void)
1931 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1932 sizeof(struct mfc_cache),
1933 0, SLAB_HWCACHE_ALIGN,
1936 panic("cannot allocate ip_mrt_cache");
1938 init_timer(&ipmr_expire_timer);
1939 ipmr_expire_timer.function=ipmr_expire_process;
1940 register_netdevice_notifier(&ip_mr_notifier);
1941 #ifdef CONFIG_PROC_FS
1942 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1943 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);