2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
38 #define NFQNL_QMAX_DEFAULT 1024
40 struct nfqnl_instance {
41 struct hlist_node hlist; /* global list of queues */
45 unsigned int queue_maxlen;
46 unsigned int copy_range;
47 unsigned int queue_total;
48 unsigned int queue_dropped;
49 unsigned int queue_user_dropped;
51 unsigned int id_sequence; /* 'sequence' of pkt ids */
53 u_int16_t queue_num; /* number of this queue */
58 struct list_head queue_list; /* packets in queue */
61 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
63 static DEFINE_SPINLOCK(instances_lock);
65 #define INSTANCE_BUCKETS 16
66 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
68 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
70 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
73 static struct nfqnl_instance *
74 instance_lookup(u_int16_t queue_num)
76 struct hlist_head *head;
77 struct hlist_node *pos;
78 struct nfqnl_instance *inst;
80 head = &instance_table[instance_hashfn(queue_num)];
81 hlist_for_each_entry_rcu(inst, pos, head, hlist) {
82 if (inst->queue_num == queue_num)
88 static struct nfqnl_instance *
89 instance_create(u_int16_t queue_num, int pid)
91 struct nfqnl_instance *inst = NULL;
94 spin_lock(&instances_lock);
95 if (instance_lookup(queue_num))
98 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
102 inst->queue_num = queue_num;
103 inst->peer_pid = pid;
104 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
105 inst->copy_range = 0xfffff;
106 inst->copy_mode = NFQNL_COPY_NONE;
107 spin_lock_init(&inst->lock);
108 INIT_LIST_HEAD(&inst->queue_list);
109 INIT_RCU_HEAD(&inst->rcu);
111 if (!try_module_get(THIS_MODULE))
114 h = instance_hashfn(queue_num);
115 hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
117 spin_unlock(&instances_lock);
124 spin_unlock(&instances_lock);
128 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
132 instance_destroy_rcu(struct rcu_head *head)
134 struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
137 nfqnl_flush(inst, NULL, 0);
139 module_put(THIS_MODULE);
143 __instance_destroy(struct nfqnl_instance *inst)
145 hlist_del_rcu(&inst->hlist);
146 call_rcu(&inst->rcu, instance_destroy_rcu);
150 instance_destroy(struct nfqnl_instance *inst)
152 spin_lock(&instances_lock);
153 __instance_destroy(inst);
154 spin_unlock(&instances_lock);
158 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
160 list_add_tail(&entry->list, &queue->queue_list);
161 queue->queue_total++;
164 static struct nf_queue_entry *
165 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
167 struct nf_queue_entry *entry = NULL, *i;
169 spin_lock_bh(&queue->lock);
171 list_for_each_entry(i, &queue->queue_list, list) {
179 list_del(&entry->list);
180 queue->queue_total--;
183 spin_unlock_bh(&queue->lock);
189 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
191 struct nf_queue_entry *entry, *next;
193 spin_lock_bh(&queue->lock);
194 list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
195 if (!cmpfn || cmpfn(entry, data)) {
196 list_del(&entry->list);
197 queue->queue_total--;
198 nf_reinject(entry, NF_DROP);
201 spin_unlock_bh(&queue->lock);
204 static struct sk_buff *
205 nfqnl_build_packet_message(struct nfqnl_instance *queue,
206 struct nf_queue_entry *entry, int *errp)
208 sk_buff_data_t old_tail;
212 struct nfqnl_msg_packet_hdr pmsg;
213 struct nlmsghdr *nlh;
214 struct nfgenmsg *nfmsg;
215 struct sk_buff *entskb = entry->skb;
216 struct net_device *indev;
217 struct net_device *outdev;
219 size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
220 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
221 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
222 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
223 #ifdef CONFIG_BRIDGE_NETFILTER
224 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
225 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
227 + nla_total_size(sizeof(u_int32_t)) /* mark */
228 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
229 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
231 outdev = entry->outdev;
233 spin_lock_bh(&queue->lock);
235 switch (queue->copy_mode) {
236 case NFQNL_COPY_META:
237 case NFQNL_COPY_NONE:
241 case NFQNL_COPY_PACKET:
242 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
243 entskb->ip_summed == CHECKSUM_COMPLETE) &&
244 (*errp = skb_checksum_help(entskb))) {
245 spin_unlock_bh(&queue->lock);
248 if (queue->copy_range == 0
249 || queue->copy_range > entskb->len)
250 data_len = entskb->len;
252 data_len = queue->copy_range;
254 size += nla_total_size(data_len);
259 spin_unlock_bh(&queue->lock);
263 entry->id = queue->id_sequence++;
265 spin_unlock_bh(&queue->lock);
267 skb = alloc_skb(size, GFP_ATOMIC);
271 old_tail = skb->tail;
272 nlh = NLMSG_PUT(skb, 0, 0,
273 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
274 sizeof(struct nfgenmsg));
275 nfmsg = NLMSG_DATA(nlh);
276 nfmsg->nfgen_family = entry->pf;
277 nfmsg->version = NFNETLINK_V0;
278 nfmsg->res_id = htons(queue->queue_num);
280 pmsg.packet_id = htonl(entry->id);
281 pmsg.hw_protocol = entskb->protocol;
282 pmsg.hook = entry->hook;
284 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
286 indev = entry->indev;
288 #ifndef CONFIG_BRIDGE_NETFILTER
289 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
291 if (entry->pf == PF_BRIDGE) {
292 /* Case 1: indev is physical input device, we need to
293 * look for bridge group (when called from
294 * netfilter_bridge) */
295 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
296 htonl(indev->ifindex));
297 /* this is the bridge group "brX" */
298 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
299 htonl(indev->br_port->br->dev->ifindex));
301 /* Case 2: indev is bridge group, we need to look for
302 * physical device (when called from ipv4) */
303 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
304 htonl(indev->ifindex));
305 if (entskb->nf_bridge && entskb->nf_bridge->physindev)
306 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
307 htonl(entskb->nf_bridge->physindev->ifindex));
313 #ifndef CONFIG_BRIDGE_NETFILTER
314 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
316 if (entry->pf == PF_BRIDGE) {
317 /* Case 1: outdev is physical output device, we need to
318 * look for bridge group (when called from
319 * netfilter_bridge) */
320 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
321 htonl(outdev->ifindex));
322 /* this is the bridge group "brX" */
323 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
324 htonl(outdev->br_port->br->dev->ifindex));
326 /* Case 2: outdev is bridge group, we need to look for
327 * physical output device (when called from ipv4) */
328 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
329 htonl(outdev->ifindex));
330 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
331 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
332 htonl(entskb->nf_bridge->physoutdev->ifindex));
338 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
340 if (indev && entskb->dev) {
341 struct nfqnl_msg_packet_hw phw;
342 int len = dev_parse_header(entskb, phw.hw_addr);
344 phw.hw_addrlen = htons(len);
345 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
349 if (entskb->tstamp.tv64) {
350 struct nfqnl_msg_packet_timestamp ts;
351 struct timeval tv = ktime_to_timeval(entskb->tstamp);
352 ts.sec = cpu_to_be64(tv.tv_sec);
353 ts.usec = cpu_to_be64(tv.tv_usec);
355 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
360 int size = nla_attr_size(data_len);
362 if (skb_tailroom(skb) < nla_total_size(data_len)) {
363 printk(KERN_WARNING "nf_queue: no tailroom!\n");
367 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
368 nla->nla_type = NFQA_PAYLOAD;
371 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
375 nlh->nlmsg_len = skb->tail - old_tail;
384 printk(KERN_ERR "nf_queue: error creating packet message\n");
389 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
391 int status = -EINVAL;
392 struct sk_buff *nskb;
393 struct nfqnl_instance *queue;
395 /* rcu_read_lock()ed by nf_hook_slow() */
396 queue = instance_lookup(queuenum);
400 if (queue->copy_mode == NFQNL_COPY_NONE)
403 nskb = nfqnl_build_packet_message(queue, entry, &status);
407 spin_lock_bh(&queue->lock);
409 if (!queue->peer_pid)
410 goto err_out_free_nskb;
412 if (queue->queue_total >= queue->queue_maxlen) {
413 queue->queue_dropped++;
416 printk(KERN_WARNING "nf_queue: full at %d entries, "
417 "dropping packets(s). Dropped: %d\n",
418 queue->queue_total, queue->queue_dropped);
419 goto err_out_free_nskb;
422 /* nfnetlink_unicast will either free the nskb or add it to a socket */
423 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
425 queue->queue_user_dropped++;
429 __enqueue_entry(queue, entry);
431 spin_unlock_bh(&queue->lock);
438 spin_unlock_bh(&queue->lock);
443 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
448 diff = data_len - e->skb->len;
450 if (pskb_trim(e->skb, data_len))
452 } else if (diff > 0) {
453 if (data_len > 0xFFFF)
455 if (diff > skb_tailroom(e->skb)) {
456 err = pskb_expand_head(e->skb, 0,
457 diff - skb_tailroom(e->skb),
460 printk(KERN_WARNING "nf_queue: OOM "
461 "in mangle, dropping packet\n");
465 skb_put(e->skb, diff);
467 if (!skb_make_writable(e->skb, data_len))
469 skb_copy_to_linear_data(e->skb, data, data_len);
470 e->skb->ip_summed = CHECKSUM_NONE;
475 nfqnl_set_mode(struct nfqnl_instance *queue,
476 unsigned char mode, unsigned int range)
480 spin_lock_bh(&queue->lock);
482 case NFQNL_COPY_NONE:
483 case NFQNL_COPY_META:
484 queue->copy_mode = mode;
485 queue->copy_range = 0;
488 case NFQNL_COPY_PACKET:
489 queue->copy_mode = mode;
490 /* we're using struct nlattr which has 16bit nla_len */
492 queue->copy_range = 0xffff;
494 queue->copy_range = range;
501 spin_unlock_bh(&queue->lock);
507 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
510 if (entry->indev->ifindex == ifindex)
513 if (entry->outdev->ifindex == ifindex)
515 #ifdef CONFIG_BRIDGE_NETFILTER
516 if (entry->skb->nf_bridge) {
517 if (entry->skb->nf_bridge->physindev &&
518 entry->skb->nf_bridge->physindev->ifindex == ifindex)
520 if (entry->skb->nf_bridge->physoutdev &&
521 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
528 /* drop all packets with either indev or outdev == ifindex from all queue
531 nfqnl_dev_drop(int ifindex)
537 for (i = 0; i < INSTANCE_BUCKETS; i++) {
538 struct hlist_node *tmp;
539 struct nfqnl_instance *inst;
540 struct hlist_head *head = &instance_table[i];
542 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
543 nfqnl_flush(inst, dev_cmp, ifindex);
549 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
552 nfqnl_rcv_dev_event(struct notifier_block *this,
553 unsigned long event, void *ptr)
555 struct net_device *dev = ptr;
557 if (dev->nd_net != &init_net)
560 /* Drop any packets associated with the downed device */
561 if (event == NETDEV_DOWN)
562 nfqnl_dev_drop(dev->ifindex);
566 static struct notifier_block nfqnl_dev_notifier = {
567 .notifier_call = nfqnl_rcv_dev_event,
571 nfqnl_rcv_nl_event(struct notifier_block *this,
572 unsigned long event, void *ptr)
574 struct netlink_notify *n = ptr;
576 if (event == NETLINK_URELEASE &&
577 n->protocol == NETLINK_NETFILTER && n->pid) {
580 /* destroy all instances for this pid */
581 spin_lock(&instances_lock);
582 for (i = 0; i < INSTANCE_BUCKETS; i++) {
583 struct hlist_node *tmp, *t2;
584 struct nfqnl_instance *inst;
585 struct hlist_head *head = &instance_table[i];
587 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
588 if ((n->net == &init_net) &&
589 (n->pid == inst->peer_pid))
590 __instance_destroy(inst);
593 spin_unlock(&instances_lock);
598 static struct notifier_block nfqnl_rtnl_notifier = {
599 .notifier_call = nfqnl_rcv_nl_event,
602 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
603 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
604 [NFQA_MARK] = { .type = NLA_U32 },
605 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
609 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
610 struct nlmsghdr *nlh, struct nlattr *nfqa[])
612 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
613 u_int16_t queue_num = ntohs(nfmsg->res_id);
615 struct nfqnl_msg_verdict_hdr *vhdr;
616 struct nfqnl_instance *queue;
617 unsigned int verdict;
618 struct nf_queue_entry *entry;
622 queue = instance_lookup(queue_num);
628 if (queue->peer_pid != NETLINK_CB(skb).pid) {
633 if (!nfqa[NFQA_VERDICT_HDR]) {
638 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
639 verdict = ntohl(vhdr->verdict);
641 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
646 entry = find_dequeue_entry(queue, ntohl(vhdr->id));
653 if (nfqa[NFQA_PAYLOAD]) {
654 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
655 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
660 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
662 nf_reinject(entry, verdict);
671 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
672 struct nlmsghdr *nlh, struct nlattr *nfqa[])
677 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
678 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
679 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
682 static const struct nf_queue_handler nfqh = {
684 .outfn = &nfqnl_enqueue_packet,
688 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
689 struct nlmsghdr *nlh, struct nlattr *nfqa[])
691 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
692 u_int16_t queue_num = ntohs(nfmsg->res_id);
693 struct nfqnl_instance *queue;
694 struct nfqnl_msg_config_cmd *cmd = NULL;
697 if (nfqa[NFQA_CFG_CMD]) {
698 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
700 /* Commands without queue context - might sleep */
701 switch (cmd->command) {
702 case NFQNL_CFG_CMD_PF_BIND:
703 ret = nf_register_queue_handler(ntohs(cmd->pf),
706 case NFQNL_CFG_CMD_PF_UNBIND:
707 ret = nf_unregister_queue_handler(ntohs(cmd->pf),
719 queue = instance_lookup(queue_num);
720 if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
726 switch (cmd->command) {
727 case NFQNL_CFG_CMD_BIND:
732 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
738 case NFQNL_CFG_CMD_UNBIND:
743 instance_destroy(queue);
745 case NFQNL_CFG_CMD_PF_BIND:
746 case NFQNL_CFG_CMD_PF_UNBIND:
754 if (nfqa[NFQA_CFG_PARAMS]) {
755 struct nfqnl_msg_config_params *params;
761 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
762 nfqnl_set_mode(queue, params->copy_mode,
763 ntohl(params->copy_range));
766 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
767 __be32 *queue_maxlen;
773 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
774 spin_lock_bh(&queue->lock);
775 queue->queue_maxlen = ntohl(*queue_maxlen);
776 spin_unlock_bh(&queue->lock);
784 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
785 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
786 .attr_count = NFQA_MAX, },
787 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
788 .attr_count = NFQA_MAX,
789 .policy = nfqa_verdict_policy },
790 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
791 .attr_count = NFQA_CFG_MAX,
792 .policy = nfqa_cfg_policy },
795 static const struct nfnetlink_subsystem nfqnl_subsys = {
797 .subsys_id = NFNL_SUBSYS_QUEUE,
798 .cb_count = NFQNL_MSG_MAX,
802 #ifdef CONFIG_PROC_FS
807 static struct hlist_node *get_first(struct seq_file *seq)
809 struct iter_state *st = seq->private;
814 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
815 if (!hlist_empty(&instance_table[st->bucket]))
816 return instance_table[st->bucket].first;
821 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
823 struct iter_state *st = seq->private;
827 if (++st->bucket >= INSTANCE_BUCKETS)
830 h = instance_table[st->bucket].first;
835 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
837 struct hlist_node *head;
838 head = get_first(seq);
841 while (pos && (head = get_next(seq, head)))
843 return pos ? NULL : head;
846 static void *seq_start(struct seq_file *seq, loff_t *pos)
848 spin_lock(&instances_lock);
849 return get_idx(seq, *pos);
852 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
855 return get_next(s, v);
858 static void seq_stop(struct seq_file *s, void *v)
860 spin_unlock(&instances_lock);
863 static int seq_show(struct seq_file *s, void *v)
865 const struct nfqnl_instance *inst = v;
867 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
869 inst->peer_pid, inst->queue_total,
870 inst->copy_mode, inst->copy_range,
871 inst->queue_dropped, inst->queue_user_dropped,
872 inst->id_sequence, 1);
875 static const struct seq_operations nfqnl_seq_ops = {
882 static int nfqnl_open(struct inode *inode, struct file *file)
884 return seq_open_private(file, &nfqnl_seq_ops,
885 sizeof(struct iter_state));
888 static const struct file_operations nfqnl_file_ops = {
889 .owner = THIS_MODULE,
893 .release = seq_release_private,
898 static int __init nfnetlink_queue_init(void)
900 int i, status = -ENOMEM;
901 #ifdef CONFIG_PROC_FS
902 struct proc_dir_entry *proc_nfqueue;
905 for (i = 0; i < INSTANCE_BUCKETS; i++)
906 INIT_HLIST_HEAD(&instance_table[i]);
908 netlink_register_notifier(&nfqnl_rtnl_notifier);
909 status = nfnetlink_subsys_register(&nfqnl_subsys);
911 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
912 goto cleanup_netlink_notifier;
915 #ifdef CONFIG_PROC_FS
916 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
920 proc_nfqueue->proc_fops = &nfqnl_file_ops;
923 register_netdevice_notifier(&nfqnl_dev_notifier);
926 #ifdef CONFIG_PROC_FS
928 nfnetlink_subsys_unregister(&nfqnl_subsys);
930 cleanup_netlink_notifier:
931 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
935 static void __exit nfnetlink_queue_fini(void)
937 nf_unregister_queue_handlers(&nfqh);
938 unregister_netdevice_notifier(&nfqnl_dev_notifier);
939 #ifdef CONFIG_PROC_FS
940 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
942 nfnetlink_subsys_unregister(&nfqnl_subsys);
943 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
946 MODULE_DESCRIPTION("netfilter packet queue handler");
947 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
948 MODULE_LICENSE("GPL");
949 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
951 module_init(nfnetlink_queue_init);
952 module_exit(nfnetlink_queue_fini);