2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
17 #include <linux/config.h>
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/major.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
35 #include <linux/slab.h>
36 #include <asm/uaccess.h>
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/proc_fs.h>
41 #include <linux/smp_lock.h>
42 #include <linux/notifier.h>
48 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
49 #define NL_EMULATE_DEV
59 int (*handler)(int unit, struct sk_buff *skb);
60 wait_queue_head_t wait;
61 struct netlink_callback *cb;
63 void (*data_ready)(struct sock *sk, int bytes);
66 static struct sock *nl_table[MAX_LINKS];
67 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
68 static unsigned nl_nonroot[MAX_LINKS];
71 static struct socket *netlink_kernel[MAX_LINKS];
74 static int netlink_dump(struct sock *sk);
75 static void netlink_destroy_callback(struct netlink_callback *cb);
77 atomic_t netlink_sock_nr;
79 static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
80 static atomic_t nl_table_users = ATOMIC_INIT(0);
82 static struct notifier_block *netlink_chain;
84 static void netlink_sock_destruct(struct sock *sk)
86 skb_queue_purge(&sk->receive_queue);
89 printk("Freeing alive netlink socket %p\n", sk);
92 BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
93 BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
94 BUG_TRAP(sk->protinfo.af_netlink->cb==NULL);
96 kfree(sk->protinfo.af_netlink);
98 atomic_dec(&netlink_sock_nr);
99 #ifdef NETLINK_REFCNT_DEBUG
100 printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
104 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
105 * Look, when several writers sleep and reader wakes them up, all but one
106 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
107 * this, _but_ remember, it adds useless work on UP machines.
110 static void netlink_table_grab(void)
112 write_lock_bh(&nl_table_lock);
114 if (atomic_read(&nl_table_users)) {
115 DECLARE_WAITQUEUE(wait, current);
117 add_wait_queue_exclusive(&nl_table_wait, &wait);
119 set_current_state(TASK_UNINTERRUPTIBLE);
120 if (atomic_read(&nl_table_users) == 0)
122 write_unlock_bh(&nl_table_lock);
124 write_lock_bh(&nl_table_lock);
127 __set_current_state(TASK_RUNNING);
128 remove_wait_queue(&nl_table_wait, &wait);
132 static __inline__ void netlink_table_ungrab(void)
134 write_unlock_bh(&nl_table_lock);
135 wake_up(&nl_table_wait);
138 static __inline__ void
139 netlink_lock_table(void)
141 /* read_lock() synchronizes us to netlink_table_grab */
143 read_lock(&nl_table_lock);
144 atomic_inc(&nl_table_users);
145 read_unlock(&nl_table_lock);
148 static __inline__ void
149 netlink_unlock_table(void)
151 if (atomic_dec_and_test(&nl_table_users))
152 wake_up(&nl_table_wait);
155 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
159 read_lock(&nl_table_lock);
160 for (sk=nl_table[protocol]; sk; sk=sk->next) {
161 if (sk->protinfo.af_netlink->pid == pid) {
163 read_unlock(&nl_table_lock);
168 read_unlock(&nl_table_lock);
172 extern struct proto_ops netlink_ops;
174 static int netlink_insert(struct sock *sk, u32 pid)
176 int err = -EADDRINUSE;
179 netlink_table_grab();
180 for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
181 if (osk->protinfo.af_netlink->pid == pid)
186 if (sk->protinfo.af_netlink->pid == 0) {
187 sk->protinfo.af_netlink->pid = pid;
188 sk->next = nl_table[sk->protocol];
189 nl_table[sk->protocol] = sk;
194 netlink_table_ungrab();
198 static void netlink_remove(struct sock *sk)
202 netlink_table_grab();
203 for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) {
210 netlink_table_ungrab();
213 static int netlink_create(struct socket *sock, int protocol)
217 sock->state = SS_UNCONNECTED;
219 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
220 return -ESOCKTNOSUPPORT;
222 if (protocol<0 || protocol >= MAX_LINKS)
223 return -EPROTONOSUPPORT;
225 sock->ops = &netlink_ops;
227 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1);
231 sock_init_data(sock,sk);
233 sk->protinfo.af_netlink = kmalloc(sizeof(struct netlink_opt), GFP_KERNEL);
234 if (sk->protinfo.af_netlink == NULL) {
238 memset(sk->protinfo.af_netlink, 0, sizeof(struct netlink_opt));
240 spin_lock_init(&sk->protinfo.af_netlink->cb_lock);
241 init_waitqueue_head(&sk->protinfo.af_netlink->wait);
242 sk->destruct = netlink_sock_destruct;
243 atomic_inc(&netlink_sock_nr);
245 sk->protocol=protocol;
249 static int netlink_release(struct socket *sock)
251 struct sock *sk = sock->sk;
258 spin_lock(&sk->protinfo.af_netlink->cb_lock);
259 if (sk->protinfo.af_netlink->cb) {
260 sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
261 netlink_destroy_callback(sk->protinfo.af_netlink->cb);
262 sk->protinfo.af_netlink->cb = NULL;
265 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
267 /* OK. Socket is unlinked, and, therefore,
268 no new packets will arrive */
272 wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);
274 skb_queue_purge(&sk->write_queue);
276 if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) {
277 struct netlink_notify n = { protocol:sk->protocol,
278 pid:sk->protinfo.af_netlink->pid };
279 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
286 static int netlink_autobind(struct socket *sock)
288 struct sock *sk = sock->sk;
290 s32 pid = current->pid;
294 netlink_table_grab();
295 for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
296 if (osk->protinfo.af_netlink->pid == pid) {
297 /* Bind collision, search negative pid values. */
301 netlink_table_ungrab();
305 netlink_table_ungrab();
307 err = netlink_insert(sk, pid);
308 if (err == -EADDRINUSE)
310 sk->protinfo.af_netlink->groups = 0;
314 static inline int netlink_capable(struct socket *sock, unsigned flag)
316 return (nl_nonroot[sock->sk->protocol] & flag) || capable(CAP_NET_ADMIN);
319 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
321 struct sock *sk = sock->sk;
323 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
325 if (nladdr->nl_family != AF_NETLINK)
328 /* Only superuser is allowed to listen multicasts */
329 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
332 if (sk->protinfo.af_netlink->pid) {
333 if (nladdr->nl_pid != sk->protinfo.af_netlink->pid)
335 sk->protinfo.af_netlink->groups = nladdr->nl_groups;
339 if (nladdr->nl_pid == 0) {
340 err = netlink_autobind(sock);
342 sk->protinfo.af_netlink->groups = nladdr->nl_groups;
346 err = netlink_insert(sk, nladdr->nl_pid);
348 sk->protinfo.af_netlink->groups = nladdr->nl_groups;
352 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
356 struct sock *sk = sock->sk;
357 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
359 if (addr->sa_family == AF_UNSPEC) {
360 sk->protinfo.af_netlink->dst_pid = 0;
361 sk->protinfo.af_netlink->dst_groups = 0;
364 if (addr->sa_family != AF_NETLINK)
367 /* Only superuser is allowed to send multicasts */
368 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
371 if (!sk->protinfo.af_netlink->pid)
372 err = netlink_autobind(sock);
375 sk->protinfo.af_netlink->dst_pid = nladdr->nl_pid;
376 sk->protinfo.af_netlink->dst_groups = nladdr->nl_groups;
382 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
384 struct sock *sk = sock->sk;
385 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
387 nladdr->nl_family = AF_NETLINK;
388 *addr_len = sizeof(*nladdr);
391 nladdr->nl_pid = sk->protinfo.af_netlink->dst_pid;
392 nladdr->nl_groups = sk->protinfo.af_netlink->dst_groups;
394 nladdr->nl_pid = sk->protinfo.af_netlink->pid;
395 nladdr->nl_groups = sk->protinfo.af_netlink->groups;
400 static void netlink_overrun(struct sock *sk)
402 if (!test_and_set_bit(0, &sk->protinfo.af_netlink->state)) {
404 sk->error_report(sk);
408 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
412 int protocol = ssk->protocol;
414 DECLARE_WAITQUEUE(wait, current);
416 timeo = sock_sndtimeo(ssk, nonblock);
419 sk = netlink_lookup(protocol, pid);
423 #ifdef NL_EMULATE_DEV
424 if (sk->protinfo.af_netlink->handler) {
426 len = sk->protinfo.af_netlink->handler(protocol, skb);
432 if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
433 test_bit(0, &sk->protinfo.af_netlink->state)) {
435 if (ssk->protinfo.af_netlink->pid == 0)
442 __set_current_state(TASK_INTERRUPTIBLE);
443 add_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
445 if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
446 test_bit(0, &sk->protinfo.af_netlink->state)) &&
448 timeo = schedule_timeout(timeo);
450 __set_current_state(TASK_RUNNING);
451 remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
454 if (signal_pending(current)) {
456 return sock_intr_errno(timeo);
462 skb_set_owner_r(skb, sk);
463 skb_queue_tail(&sk->receive_queue, skb);
464 sk->data_ready(sk, len);
470 return -ECONNREFUSED;
473 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
475 #ifdef NL_EMULATE_DEV
476 if (sk->protinfo.af_netlink->handler) {
478 sk->protinfo.af_netlink->handler(sk->protocol, skb);
482 if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
483 !test_bit(0, &sk->protinfo.af_netlink->state)) {
485 skb_set_owner_r(skb, sk);
486 skb_queue_tail(&sk->receive_queue, skb);
487 sk->data_ready(sk, skb->len);
493 void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
494 u32 group, int allocation)
497 struct sk_buff *skb2 = NULL;
498 int protocol = ssk->protocol;
501 /* While we sleep in clone, do not allow to change socket list */
503 netlink_lock_table();
505 for (sk = nl_table[protocol]; sk; sk = sk->next) {
509 if (sk->protinfo.af_netlink->pid == pid ||
510 !(sk->protinfo.af_netlink->groups&group))
520 if (atomic_read(&skb->users) != 1) {
521 skb2 = skb_clone(skb, allocation);
524 atomic_inc(&skb->users);
529 /* Clone failed. Notify ALL listeners. */
531 } else if (netlink_broadcast_deliver(sk, skb2)) {
538 netlink_unlock_table();
545 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
548 int protocol = ssk->protocol;
550 read_lock(&nl_table_lock);
551 for (sk = nl_table[protocol]; sk; sk = sk->next) {
555 if (sk->protinfo.af_netlink->pid == pid ||
556 !(sk->protinfo.af_netlink->groups&group))
560 sk->error_report(sk);
562 read_unlock(&nl_table_lock);
565 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, int len,
566 struct scm_cookie *scm)
568 struct sock *sk = sock->sk;
569 struct sockaddr_nl *addr=msg->msg_name;
575 if (msg->msg_flags&MSG_OOB)
578 if (msg->msg_namelen) {
579 if (addr->nl_family != AF_NETLINK)
581 dst_pid = addr->nl_pid;
582 dst_groups = addr->nl_groups;
583 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
586 dst_pid = sk->protinfo.af_netlink->dst_pid;
587 dst_groups = sk->protinfo.af_netlink->dst_groups;
590 if (!sk->protinfo.af_netlink->pid) {
591 err = netlink_autobind(sock);
597 if ((unsigned)len > sk->sndbuf-32)
600 skb = alloc_skb(len, GFP_KERNEL);
604 NETLINK_CB(skb).pid = sk->protinfo.af_netlink->pid;
605 NETLINK_CB(skb).groups = sk->protinfo.af_netlink->groups;
606 NETLINK_CB(skb).dst_pid = dst_pid;
607 NETLINK_CB(skb).dst_groups = dst_groups;
608 memcpy(NETLINK_CREDS(skb), &scm->creds, sizeof(struct ucred));
610 /* What can I do? Netlink is asynchronous, so that
611 we will have to save current capabilities to
612 check them, when this message will be delivered
613 to corresponding kernel module. --ANK (980802)
615 NETLINK_CB(skb).eff_cap = current->cap_effective;
618 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
624 atomic_inc(&skb->users);
625 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
627 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
633 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, int len,
634 int flags, struct scm_cookie *scm)
636 struct sock *sk = sock->sk;
637 int noblock = flags&MSG_DONTWAIT;
647 skb = skb_recv_datagram(sk,flags,noblock,&err);
651 msg->msg_namelen = 0;
655 msg->msg_flags |= MSG_TRUNC;
659 skb->h.raw = skb->data;
660 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
663 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
664 addr->nl_family = AF_NETLINK;
665 addr->nl_pid = NETLINK_CB(skb).pid;
666 addr->nl_groups = NETLINK_CB(skb).dst_groups;
667 msg->msg_namelen = sizeof(*addr);
670 scm->creds = *NETLINK_CREDS(skb);
671 skb_free_datagram(sk, skb);
673 if (sk->protinfo.af_netlink->cb
674 && atomic_read(&sk->rmem_alloc) <= sk->rcvbuf/2)
678 if (skb_queue_len(&sk->receive_queue) <= sk->rcvbuf/2) {
679 if (skb_queue_len(&sk->receive_queue) == 0)
680 clear_bit(0, &sk->protinfo.af_netlink->state);
681 if (!test_bit(0, &sk->protinfo.af_netlink->state))
682 wake_up_interruptible(&sk->protinfo.af_netlink->wait);
684 return err ? : copied;
687 void netlink_data_ready(struct sock *sk, int len)
689 if (sk->protinfo.af_netlink->data_ready)
690 sk->protinfo.af_netlink->data_ready(sk, len);
692 if (skb_queue_len(&sk->receive_queue) <= sk->rcvbuf/2) {
693 if (skb_queue_len(&sk->receive_queue) == 0)
694 clear_bit(0, &sk->protinfo.af_netlink->state);
695 if (!test_bit(0, &sk->protinfo.af_netlink->state))
696 wake_up_interruptible(&sk->protinfo.af_netlink->wait);
701 * We export these functions to other modules. They provide a
702 * complete set of kernel non-blocking support for message
707 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
712 if (unit<0 || unit>=MAX_LINKS)
715 if (!(sock = sock_alloc()))
718 sock->type = SOCK_RAW;
720 if (netlink_create(sock, unit) < 0) {
725 sk->data_ready = netlink_data_ready;
727 sk->protinfo.af_netlink->data_ready = input;
729 netlink_insert(sk, 0);
733 void netlink_set_nonroot(int protocol, unsigned flags)
735 if ((unsigned)protocol < MAX_LINKS)
736 nl_nonroot[protocol] = flags;
739 static void netlink_destroy_callback(struct netlink_callback *cb)
747 * It looks a bit ugly.
748 * It would be better to create kernel thread.
751 static int netlink_dump(struct sock *sk)
753 struct netlink_callback *cb;
755 struct nlmsghdr *nlh;
758 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
762 spin_lock(&sk->protinfo.af_netlink->cb_lock);
764 cb = sk->protinfo.af_netlink->cb;
766 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
771 len = cb->dump(skb, cb);
774 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
775 skb_queue_tail(&sk->receive_queue, skb);
776 sk->data_ready(sk, len);
780 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
781 nlh->nlmsg_flags |= NLM_F_MULTI;
782 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
783 skb_queue_tail(&sk->receive_queue, skb);
784 sk->data_ready(sk, skb->len);
787 sk->protinfo.af_netlink->cb = NULL;
788 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
790 netlink_destroy_callback(cb);
795 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
796 struct nlmsghdr *nlh,
797 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
798 int (*done)(struct netlink_callback*))
800 struct netlink_callback *cb;
803 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
807 memset(cb, 0, sizeof(*cb));
811 atomic_inc(&skb->users);
814 sk = netlink_lookup(ssk->protocol, NETLINK_CB(skb).pid);
816 netlink_destroy_callback(cb);
817 return -ECONNREFUSED;
819 /* A dump is in progress... */
820 spin_lock(&sk->protinfo.af_netlink->cb_lock);
821 if (sk->protinfo.af_netlink->cb) {
822 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
823 netlink_destroy_callback(cb);
827 sk->protinfo.af_netlink->cb = cb;
828 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
834 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
837 struct nlmsghdr *rep;
838 struct nlmsgerr *errmsg;
842 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
844 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
846 skb = alloc_skb(size, GFP_KERNEL);
850 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
851 NLMSG_ERROR, sizeof(struct nlmsgerr));
852 errmsg = NLMSG_DATA(rep);
854 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
855 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
859 #ifdef NL_EMULATE_DEV
861 static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
864 * Backward compatibility.
867 int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
869 struct sock *sk = netlink_kernel_create(unit, NULL);
872 sk->protinfo.af_netlink->handler = function;
873 write_lock_bh(&nl_emu_lock);
874 netlink_kernel[unit] = sk->socket;
875 write_unlock_bh(&nl_emu_lock);
879 void netlink_detach(int unit)
883 write_lock_bh(&nl_emu_lock);
884 sock = netlink_kernel[unit];
885 netlink_kernel[unit] = NULL;
886 write_unlock_bh(&nl_emu_lock);
891 int netlink_post(int unit, struct sk_buff *skb)
895 read_lock(&nl_emu_lock);
896 sock = netlink_kernel[unit];
898 struct sock *sk = sock->sk;
899 memset(skb->cb, 0, sizeof(skb->cb));
901 read_unlock(&nl_emu_lock);
903 netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
908 read_unlock(&nl_emu_lock);
915 #ifdef CONFIG_PROC_FS
916 static int netlink_read_proc(char *buffer, char **start, off_t offset,
917 int length, int *eof, void *data)
925 len+= sprintf(buffer,"sk Eth Pid Groups "
926 "Rmem Wmem Dump Locks\n");
928 for (i=0; i<MAX_LINKS; i++) {
929 read_lock(&nl_table_lock);
930 for (s = nl_table[i]; s; s = s->next) {
931 len+=sprintf(buffer+len,"%p %-3d %-6d %08x %-8d %-8d %p %d",
934 s->protinfo.af_netlink->pid,
935 s->protinfo.af_netlink->groups,
936 atomic_read(&s->rmem_alloc),
937 atomic_read(&s->wmem_alloc),
938 s->protinfo.af_netlink->cb,
939 atomic_read(&s->refcnt)
949 if(pos>offset+length) {
950 read_unlock(&nl_table_lock);
954 read_unlock(&nl_table_lock);
959 *start=buffer+(offset-begin);
969 int netlink_register_notifier(struct notifier_block *nb)
971 return notifier_chain_register(&netlink_chain, nb);
974 int netlink_unregister_notifier(struct notifier_block *nb)
976 return notifier_chain_unregister(&netlink_chain, nb);
979 struct proto_ops netlink_ops = {
982 release: netlink_release,
984 connect: netlink_connect,
985 socketpair: sock_no_socketpair,
986 accept: sock_no_accept,
987 getname: netlink_getname,
989 ioctl: sock_no_ioctl,
990 listen: sock_no_listen,
991 shutdown: sock_no_shutdown,
992 setsockopt: sock_no_setsockopt,
993 getsockopt: sock_no_getsockopt,
994 sendmsg: netlink_sendmsg,
995 recvmsg: netlink_recvmsg,
997 sendpage: sock_no_sendpage,
1000 struct net_proto_family netlink_family_ops = {
1005 static int __init netlink_proto_init(void)
1007 struct sk_buff *dummy_skb;
1009 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) {
1010 printk(KERN_CRIT "netlink_init: panic\n");
1013 sock_register(&netlink_family_ops);
1014 #ifdef CONFIG_PROC_FS
1015 create_proc_read_entry("net/netlink", 0, 0, netlink_read_proc, NULL);
1020 static void __exit netlink_proto_exit(void)
1022 sock_unregister(PF_NETLINK);
1023 remove_proc_entry("net/netlink", NULL);
1026 module_init(netlink_proto_init);
1027 module_exit(netlink_proto_exit);