[NET]: Make /proc/net per network namespace
[powerpc.git] / net / unix / af_unix.c
1 /*
2  * NET4:        Implementation of BSD Unix domain sockets.
3  *
4  * Authors:     Alan Cox, <alan.cox@linux.org>
5  *
6  *              This program is free software; you can redistribute it and/or
7  *              modify it under the terms of the GNU General Public License
8  *              as published by the Free Software Foundation; either version
9  *              2 of the License, or (at your option) any later version.
10  *
11  * Version:     $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12  *
13  * Fixes:
14  *              Linus Torvalds  :       Assorted bug cures.
15  *              Niibe Yutaka    :       async I/O support.
16  *              Carsten Paeth   :       PF_UNIX check, address fixes.
17  *              Alan Cox        :       Limit size of allocated blocks.
18  *              Alan Cox        :       Fixed the stupid socketpair bug.
19  *              Alan Cox        :       BSD compatibility fine tuning.
20  *              Alan Cox        :       Fixed a bug in connect when interrupted.
21  *              Alan Cox        :       Sorted out a proper draft version of
22  *                                      file descriptor passing hacked up from
23  *                                      Mike Shaver's work.
24  *              Marty Leisner   :       Fixes to fd passing
25  *              Nick Nevin      :       recvmsg bugfix.
26  *              Alan Cox        :       Started proper garbage collector
27  *              Heiko EiBfeldt  :       Missing verify_area check
28  *              Alan Cox        :       Started POSIXisms
29  *              Andreas Schwab  :       Replace inode by dentry for proper
30  *                                      reference counting
31  *              Kirk Petersen   :       Made this a module
32  *          Christoph Rohland   :       Elegant non-blocking accept/connect algorithm.
33  *                                      Lots of bug fixes.
34  *           Alexey Kuznetosv   :       Repaired (I hope) bugs introduces
35  *                                      by above two patches.
36  *           Andrea Arcangeli   :       If possible we block in connect(2)
37  *                                      if the max backlog of the listen socket
38  *                                      is been reached. This won't break
39  *                                      old apps and it will avoid huge amount
40  *                                      of socks hashed (this for unix_gc()
41  *                                      performances reasons).
42  *                                      Security fix that limits the max
43  *                                      number of socks to 2*max_files and
44  *                                      the number of skb queueable in the
45  *                                      dgram receiver.
46  *              Artur Skawina   :       Hash function optimizations
47  *           Alexey Kuznetsov   :       Full scale SMP. Lot of bugs are introduced 8)
48  *            Malcolm Beattie   :       Set peercred for socketpair
49  *           Michal Ostrowski   :       Module initialization cleanup.
50  *           Arnaldo C. Melo    :       Remove MOD_{INC,DEC}_USE_COUNT,
51  *                                      the core infrastructure is doing that
52  *                                      for all net proto families now (2.5.69+)
53  *
54  *
55  * Known differences from reference BSD that was tested:
56  *
57  *      [TO FIX]
58  *      ECONNREFUSED is not returned from one end of a connected() socket to the
59  *              other the moment one end closes.
60  *      fstat() doesn't return st_dev=0, and give the blksize as high water mark
61  *              and a fake inode identifier (nor the BSD first socket fstat twice bug).
62  *      [NOT TO FIX]
63  *      accept() returns a path name even if the connecting socket has closed
64  *              in the meantime (BSD loses the path and gives up).
65  *      accept() returns 0 length path for an unbound connector. BSD returns 16
66  *              and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67  *      socketpair(...SOCK_RAW..) doesn't panic the kernel.
68  *      BSD af_unix apparently has connect forgetting to block properly.
69  *              (need to check this with the POSIX spec in detail)
70  *
71  * Differences from 2.0.0-11-... (ANK)
72  *      Bug fixes and improvements.
73  *              - client shutdown killed server socket.
74  *              - removed all useless cli/sti pairs.
75  *
76  *      Semantic changes/extensions.
77  *              - generic control message passing.
78  *              - SCM_CREDENTIALS control message.
79  *              - "Abstract" (not FS based) socket bindings.
80  *                Abstract names are sequences of bytes (not zero terminated)
81  *                started by 0, so that this name space does not intersect
82  *                with BSD names.
83  */
84
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119
120 int sysctl_unix_max_dgram_qlen __read_mostly = 10;
121
122 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
123 static DEFINE_SPINLOCK(unix_table_lock);
124 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
125
126 #define unix_sockets_unbound    (&unix_socket_table[UNIX_HASH_SIZE])
127
128 #define UNIX_ABSTRACT(sk)       (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
129
130 static struct sock *first_unix_socket(int *i)
131 {
132         for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
133                 if (!hlist_empty(&unix_socket_table[*i]))
134                         return __sk_head(&unix_socket_table[*i]);
135         }
136         return NULL;
137 }
138
139 static struct sock *next_unix_socket(int *i, struct sock *s)
140 {
141         struct sock *next = sk_next(s);
142         /* More in this chain? */
143         if (next)
144                 return next;
145         /* Look for next non-empty chain. */
146         for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
147                 if (!hlist_empty(&unix_socket_table[*i]))
148                         return __sk_head(&unix_socket_table[*i]);
149         }
150         return NULL;
151 }
152
153 #define forall_unix_sockets(i, s) \
154         for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
155
156 #ifdef CONFIG_SECURITY_NETWORK
157 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
158 {
159         memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
160 }
161
162 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
163 {
164         scm->secid = *UNIXSID(skb);
165 }
166 #else
167 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
168 { }
169
170 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
171 { }
172 #endif /* CONFIG_SECURITY_NETWORK */
173
174 /*
175  *  SMP locking strategy:
176  *    hash table is protected with spinlock unix_table_lock
177  *    each socket state is protected by separate rwlock.
178  */
179
180 static inline unsigned unix_hash_fold(__wsum n)
181 {
182         unsigned hash = (__force unsigned)n;
183         hash ^= hash>>16;
184         hash ^= hash>>8;
185         return hash&(UNIX_HASH_SIZE-1);
186 }
187
188 #define unix_peer(sk) (unix_sk(sk)->peer)
189
190 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
191 {
192         return unix_peer(osk) == sk;
193 }
194
195 static inline int unix_may_send(struct sock *sk, struct sock *osk)
196 {
197         return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
198 }
199
200 static struct sock *unix_peer_get(struct sock *s)
201 {
202         struct sock *peer;
203
204         unix_state_lock(s);
205         peer = unix_peer(s);
206         if (peer)
207                 sock_hold(peer);
208         unix_state_unlock(s);
209         return peer;
210 }
211
212 static inline void unix_release_addr(struct unix_address *addr)
213 {
214         if (atomic_dec_and_test(&addr->refcnt))
215                 kfree(addr);
216 }
217
218 /*
219  *      Check unix socket name:
220  *              - should be not zero length.
221  *              - if started by not zero, should be NULL terminated (FS object)
222  *              - if started by zero, it is abstract name.
223  */
224
225 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
226 {
227         if (len <= sizeof(short) || len > sizeof(*sunaddr))
228                 return -EINVAL;
229         if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230                 return -EINVAL;
231         if (sunaddr->sun_path[0]) {
232                 /*
233                  * This may look like an off by one error but it is a bit more
234                  * subtle. 108 is the longest valid AF_UNIX path for a binding.
235                  * sun_path[108] doesnt as such exist.  However in kernel space
236                  * we are guaranteed that it is a valid memory location in our
237                  * kernel address buffer.
238                  */
239                 ((char *)sunaddr)[len]=0;
240                 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241                 return len;
242         }
243
244         *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
245         return len;
246 }
247
248 static void __unix_remove_socket(struct sock *sk)
249 {
250         sk_del_node_init(sk);
251 }
252
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254 {
255         BUG_TRAP(sk_unhashed(sk));
256         sk_add_node(sk, list);
257 }
258
259 static inline void unix_remove_socket(struct sock *sk)
260 {
261         spin_lock(&unix_table_lock);
262         __unix_remove_socket(sk);
263         spin_unlock(&unix_table_lock);
264 }
265
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267 {
268         spin_lock(&unix_table_lock);
269         __unix_insert_socket(list, sk);
270         spin_unlock(&unix_table_lock);
271 }
272
273 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
274                                               int len, int type, unsigned hash)
275 {
276         struct sock *s;
277         struct hlist_node *node;
278
279         sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
280                 struct unix_sock *u = unix_sk(s);
281
282                 if (u->addr->len == len &&
283                     !memcmp(u->addr->name, sunname, len))
284                         goto found;
285         }
286         s = NULL;
287 found:
288         return s;
289 }
290
291 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
292                                                    int len, int type,
293                                                    unsigned hash)
294 {
295         struct sock *s;
296
297         spin_lock(&unix_table_lock);
298         s = __unix_find_socket_byname(sunname, len, type, hash);
299         if (s)
300                 sock_hold(s);
301         spin_unlock(&unix_table_lock);
302         return s;
303 }
304
305 static struct sock *unix_find_socket_byinode(struct inode *i)
306 {
307         struct sock *s;
308         struct hlist_node *node;
309
310         spin_lock(&unix_table_lock);
311         sk_for_each(s, node,
312                     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
313                 struct dentry *dentry = unix_sk(s)->dentry;
314
315                 if(dentry && dentry->d_inode == i)
316                 {
317                         sock_hold(s);
318                         goto found;
319                 }
320         }
321         s = NULL;
322 found:
323         spin_unlock(&unix_table_lock);
324         return s;
325 }
326
327 static inline int unix_writable(struct sock *sk)
328 {
329         return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
330 }
331
332 static void unix_write_space(struct sock *sk)
333 {
334         read_lock(&sk->sk_callback_lock);
335         if (unix_writable(sk)) {
336                 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
337                         wake_up_interruptible(sk->sk_sleep);
338                 sk_wake_async(sk, 2, POLL_OUT);
339         }
340         read_unlock(&sk->sk_callback_lock);
341 }
342
343 /* When dgram socket disconnects (or changes its peer), we clear its receive
344  * queue of packets arrived from previous peer. First, it allows to do
345  * flow control based only on wmem_alloc; second, sk connected to peer
346  * may receive messages only from that peer. */
347 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
348 {
349         if (!skb_queue_empty(&sk->sk_receive_queue)) {
350                 skb_queue_purge(&sk->sk_receive_queue);
351                 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
352
353                 /* If one link of bidirectional dgram pipe is disconnected,
354                  * we signal error. Messages are lost. Do not make this,
355                  * when peer was not connected to us.
356                  */
357                 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
358                         other->sk_err = ECONNRESET;
359                         other->sk_error_report(other);
360                 }
361         }
362 }
363
364 static void unix_sock_destructor(struct sock *sk)
365 {
366         struct unix_sock *u = unix_sk(sk);
367
368         skb_queue_purge(&sk->sk_receive_queue);
369
370         BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
371         BUG_TRAP(sk_unhashed(sk));
372         BUG_TRAP(!sk->sk_socket);
373         if (!sock_flag(sk, SOCK_DEAD)) {
374                 printk("Attempt to release alive unix socket: %p\n", sk);
375                 return;
376         }
377
378         if (u->addr)
379                 unix_release_addr(u->addr);
380
381         atomic_dec(&unix_nr_socks);
382 #ifdef UNIX_REFCNT_DEBUG
383         printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
384 #endif
385 }
386
387 static int unix_release_sock (struct sock *sk, int embrion)
388 {
389         struct unix_sock *u = unix_sk(sk);
390         struct dentry *dentry;
391         struct vfsmount *mnt;
392         struct sock *skpair;
393         struct sk_buff *skb;
394         int state;
395
396         unix_remove_socket(sk);
397
398         /* Clear state */
399         unix_state_lock(sk);
400         sock_orphan(sk);
401         sk->sk_shutdown = SHUTDOWN_MASK;
402         dentry       = u->dentry;
403         u->dentry    = NULL;
404         mnt          = u->mnt;
405         u->mnt       = NULL;
406         state = sk->sk_state;
407         sk->sk_state = TCP_CLOSE;
408         unix_state_unlock(sk);
409
410         wake_up_interruptible_all(&u->peer_wait);
411
412         skpair=unix_peer(sk);
413
414         if (skpair!=NULL) {
415                 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
416                         unix_state_lock(skpair);
417                         /* No more writes */
418                         skpair->sk_shutdown = SHUTDOWN_MASK;
419                         if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
420                                 skpair->sk_err = ECONNRESET;
421                         unix_state_unlock(skpair);
422                         skpair->sk_state_change(skpair);
423                         read_lock(&skpair->sk_callback_lock);
424                         sk_wake_async(skpair,1,POLL_HUP);
425                         read_unlock(&skpair->sk_callback_lock);
426                 }
427                 sock_put(skpair); /* It may now die */
428                 unix_peer(sk) = NULL;
429         }
430
431         /* Try to flush out this socket. Throw out buffers at least */
432
433         while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
434                 if (state==TCP_LISTEN)
435                         unix_release_sock(skb->sk, 1);
436                 /* passed fds are erased in the kfree_skb hook        */
437                 kfree_skb(skb);
438         }
439
440         if (dentry) {
441                 dput(dentry);
442                 mntput(mnt);
443         }
444
445         sock_put(sk);
446
447         /* ---- Socket is dead now and most probably destroyed ---- */
448
449         /*
450          * Fixme: BSD difference: In BSD all sockets connected to use get
451          *        ECONNRESET and we die on the spot. In Linux we behave
452          *        like files and pipes do and wait for the last
453          *        dereference.
454          *
455          * Can't we simply set sock->err?
456          *
457          *        What the above comment does talk about? --ANK(980817)
458          */
459
460         if (atomic_read(&unix_tot_inflight))
461                 unix_gc();              /* Garbage collect fds */
462
463         return 0;
464 }
465
466 static int unix_listen(struct socket *sock, int backlog)
467 {
468         int err;
469         struct sock *sk = sock->sk;
470         struct unix_sock *u = unix_sk(sk);
471
472         err = -EOPNOTSUPP;
473         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
474                 goto out;                       /* Only stream/seqpacket sockets accept */
475         err = -EINVAL;
476         if (!u->addr)
477                 goto out;                       /* No listens on an unbound socket */
478         unix_state_lock(sk);
479         if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
480                 goto out_unlock;
481         if (backlog > sk->sk_max_ack_backlog)
482                 wake_up_interruptible_all(&u->peer_wait);
483         sk->sk_max_ack_backlog  = backlog;
484         sk->sk_state            = TCP_LISTEN;
485         /* set credentials so connect can copy them */
486         sk->sk_peercred.pid     = current->tgid;
487         sk->sk_peercred.uid     = current->euid;
488         sk->sk_peercred.gid     = current->egid;
489         err = 0;
490
491 out_unlock:
492         unix_state_unlock(sk);
493 out:
494         return err;
495 }
496
497 static int unix_release(struct socket *);
498 static int unix_bind(struct socket *, struct sockaddr *, int);
499 static int unix_stream_connect(struct socket *, struct sockaddr *,
500                                int addr_len, int flags);
501 static int unix_socketpair(struct socket *, struct socket *);
502 static int unix_accept(struct socket *, struct socket *, int);
503 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
504 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
505 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
506 static int unix_shutdown(struct socket *, int);
507 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
508                                struct msghdr *, size_t);
509 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
510                                struct msghdr *, size_t, int);
511 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
512                               struct msghdr *, size_t);
513 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
514                               struct msghdr *, size_t, int);
515 static int unix_dgram_connect(struct socket *, struct sockaddr *,
516                               int, int);
517 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
518                                   struct msghdr *, size_t);
519
520 static const struct proto_ops unix_stream_ops = {
521         .family =       PF_UNIX,
522         .owner =        THIS_MODULE,
523         .release =      unix_release,
524         .bind =         unix_bind,
525         .connect =      unix_stream_connect,
526         .socketpair =   unix_socketpair,
527         .accept =       unix_accept,
528         .getname =      unix_getname,
529         .poll =         unix_poll,
530         .ioctl =        unix_ioctl,
531         .listen =       unix_listen,
532         .shutdown =     unix_shutdown,
533         .setsockopt =   sock_no_setsockopt,
534         .getsockopt =   sock_no_getsockopt,
535         .sendmsg =      unix_stream_sendmsg,
536         .recvmsg =      unix_stream_recvmsg,
537         .mmap =         sock_no_mmap,
538         .sendpage =     sock_no_sendpage,
539 };
540
541 static const struct proto_ops unix_dgram_ops = {
542         .family =       PF_UNIX,
543         .owner =        THIS_MODULE,
544         .release =      unix_release,
545         .bind =         unix_bind,
546         .connect =      unix_dgram_connect,
547         .socketpair =   unix_socketpair,
548         .accept =       sock_no_accept,
549         .getname =      unix_getname,
550         .poll =         datagram_poll,
551         .ioctl =        unix_ioctl,
552         .listen =       sock_no_listen,
553         .shutdown =     unix_shutdown,
554         .setsockopt =   sock_no_setsockopt,
555         .getsockopt =   sock_no_getsockopt,
556         .sendmsg =      unix_dgram_sendmsg,
557         .recvmsg =      unix_dgram_recvmsg,
558         .mmap =         sock_no_mmap,
559         .sendpage =     sock_no_sendpage,
560 };
561
562 static const struct proto_ops unix_seqpacket_ops = {
563         .family =       PF_UNIX,
564         .owner =        THIS_MODULE,
565         .release =      unix_release,
566         .bind =         unix_bind,
567         .connect =      unix_stream_connect,
568         .socketpair =   unix_socketpair,
569         .accept =       unix_accept,
570         .getname =      unix_getname,
571         .poll =         datagram_poll,
572         .ioctl =        unix_ioctl,
573         .listen =       unix_listen,
574         .shutdown =     unix_shutdown,
575         .setsockopt =   sock_no_setsockopt,
576         .getsockopt =   sock_no_getsockopt,
577         .sendmsg =      unix_seqpacket_sendmsg,
578         .recvmsg =      unix_dgram_recvmsg,
579         .mmap =         sock_no_mmap,
580         .sendpage =     sock_no_sendpage,
581 };
582
583 static struct proto unix_proto = {
584         .name     = "UNIX",
585         .owner    = THIS_MODULE,
586         .obj_size = sizeof(struct unix_sock),
587 };
588
589 /*
590  * AF_UNIX sockets do not interact with hardware, hence they
591  * dont trigger interrupts - so it's safe for them to have
592  * bh-unsafe locking for their sk_receive_queue.lock. Split off
593  * this special lock-class by reinitializing the spinlock key:
594  */
595 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
596
597 static struct sock * unix_create1(struct socket *sock)
598 {
599         struct sock *sk = NULL;
600         struct unix_sock *u;
601
602         if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
603                 goto out;
604
605         sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
606         if (!sk)
607                 goto out;
608
609         atomic_inc(&unix_nr_socks);
610
611         sock_init_data(sock,sk);
612         lockdep_set_class(&sk->sk_receive_queue.lock,
613                                 &af_unix_sk_receive_queue_lock_key);
614
615         sk->sk_write_space      = unix_write_space;
616         sk->sk_max_ack_backlog  = sysctl_unix_max_dgram_qlen;
617         sk->sk_destruct         = unix_sock_destructor;
618         u         = unix_sk(sk);
619         u->dentry = NULL;
620         u->mnt    = NULL;
621         spin_lock_init(&u->lock);
622         atomic_set(&u->inflight, 0);
623         INIT_LIST_HEAD(&u->link);
624         mutex_init(&u->readlock); /* single task reading lock */
625         init_waitqueue_head(&u->peer_wait);
626         unix_insert_socket(unix_sockets_unbound, sk);
627 out:
628         return sk;
629 }
630
631 static int unix_create(struct socket *sock, int protocol)
632 {
633         if (protocol && protocol != PF_UNIX)
634                 return -EPROTONOSUPPORT;
635
636         sock->state = SS_UNCONNECTED;
637
638         switch (sock->type) {
639         case SOCK_STREAM:
640                 sock->ops = &unix_stream_ops;
641                 break;
642                 /*
643                  *      Believe it or not BSD has AF_UNIX, SOCK_RAW though
644                  *      nothing uses it.
645                  */
646         case SOCK_RAW:
647                 sock->type=SOCK_DGRAM;
648         case SOCK_DGRAM:
649                 sock->ops = &unix_dgram_ops;
650                 break;
651         case SOCK_SEQPACKET:
652                 sock->ops = &unix_seqpacket_ops;
653                 break;
654         default:
655                 return -ESOCKTNOSUPPORT;
656         }
657
658         return unix_create1(sock) ? 0 : -ENOMEM;
659 }
660
661 static int unix_release(struct socket *sock)
662 {
663         struct sock *sk = sock->sk;
664
665         if (!sk)
666                 return 0;
667
668         sock->sk = NULL;
669
670         return unix_release_sock (sk, 0);
671 }
672
673 static int unix_autobind(struct socket *sock)
674 {
675         struct sock *sk = sock->sk;
676         struct unix_sock *u = unix_sk(sk);
677         static u32 ordernum = 1;
678         struct unix_address * addr;
679         int err;
680
681         mutex_lock(&u->readlock);
682
683         err = 0;
684         if (u->addr)
685                 goto out;
686
687         err = -ENOMEM;
688         addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
689         if (!addr)
690                 goto out;
691
692         addr->name->sun_family = AF_UNIX;
693         atomic_set(&addr->refcnt, 1);
694
695 retry:
696         addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
697         addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
698
699         spin_lock(&unix_table_lock);
700         ordernum = (ordernum+1)&0xFFFFF;
701
702         if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
703                                       addr->hash)) {
704                 spin_unlock(&unix_table_lock);
705                 /* Sanity yield. It is unusual case, but yet... */
706                 if (!(ordernum&0xFF))
707                         yield();
708                 goto retry;
709         }
710         addr->hash ^= sk->sk_type;
711
712         __unix_remove_socket(sk);
713         u->addr = addr;
714         __unix_insert_socket(&unix_socket_table[addr->hash], sk);
715         spin_unlock(&unix_table_lock);
716         err = 0;
717
718 out:    mutex_unlock(&u->readlock);
719         return err;
720 }
721
722 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
723                                     int type, unsigned hash, int *error)
724 {
725         struct sock *u;
726         struct nameidata nd;
727         int err = 0;
728
729         if (sunname->sun_path[0]) {
730                 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
731                 if (err)
732                         goto fail;
733                 err = vfs_permission(&nd, MAY_WRITE);
734                 if (err)
735                         goto put_fail;
736
737                 err = -ECONNREFUSED;
738                 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
739                         goto put_fail;
740                 u=unix_find_socket_byinode(nd.dentry->d_inode);
741                 if (!u)
742                         goto put_fail;
743
744                 if (u->sk_type == type)
745                         touch_atime(nd.mnt, nd.dentry);
746
747                 path_release(&nd);
748
749                 err=-EPROTOTYPE;
750                 if (u->sk_type != type) {
751                         sock_put(u);
752                         goto fail;
753                 }
754         } else {
755                 err = -ECONNREFUSED;
756                 u=unix_find_socket_byname(sunname, len, type, hash);
757                 if (u) {
758                         struct dentry *dentry;
759                         dentry = unix_sk(u)->dentry;
760                         if (dentry)
761                                 touch_atime(unix_sk(u)->mnt, dentry);
762                 } else
763                         goto fail;
764         }
765         return u;
766
767 put_fail:
768         path_release(&nd);
769 fail:
770         *error=err;
771         return NULL;
772 }
773
774
775 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
776 {
777         struct sock *sk = sock->sk;
778         struct unix_sock *u = unix_sk(sk);
779         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
780         struct dentry * dentry = NULL;
781         struct nameidata nd;
782         int err;
783         unsigned hash;
784         struct unix_address *addr;
785         struct hlist_head *list;
786
787         err = -EINVAL;
788         if (sunaddr->sun_family != AF_UNIX)
789                 goto out;
790
791         if (addr_len==sizeof(short)) {
792                 err = unix_autobind(sock);
793                 goto out;
794         }
795
796         err = unix_mkname(sunaddr, addr_len, &hash);
797         if (err < 0)
798                 goto out;
799         addr_len = err;
800
801         mutex_lock(&u->readlock);
802
803         err = -EINVAL;
804         if (u->addr)
805                 goto out_up;
806
807         err = -ENOMEM;
808         addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
809         if (!addr)
810                 goto out_up;
811
812         memcpy(addr->name, sunaddr, addr_len);
813         addr->len = addr_len;
814         addr->hash = hash ^ sk->sk_type;
815         atomic_set(&addr->refcnt, 1);
816
817         if (sunaddr->sun_path[0]) {
818                 unsigned int mode;
819                 err = 0;
820                 /*
821                  * Get the parent directory, calculate the hash for last
822                  * component.
823                  */
824                 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
825                 if (err)
826                         goto out_mknod_parent;
827
828                 dentry = lookup_create(&nd, 0);
829                 err = PTR_ERR(dentry);
830                 if (IS_ERR(dentry))
831                         goto out_mknod_unlock;
832
833                 /*
834                  * All right, let's create it.
835                  */
836                 mode = S_IFSOCK |
837                        (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
838                 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
839                 if (err)
840                         goto out_mknod_dput;
841                 mutex_unlock(&nd.dentry->d_inode->i_mutex);
842                 dput(nd.dentry);
843                 nd.dentry = dentry;
844
845                 addr->hash = UNIX_HASH_SIZE;
846         }
847
848         spin_lock(&unix_table_lock);
849
850         if (!sunaddr->sun_path[0]) {
851                 err = -EADDRINUSE;
852                 if (__unix_find_socket_byname(sunaddr, addr_len,
853                                               sk->sk_type, hash)) {
854                         unix_release_addr(addr);
855                         goto out_unlock;
856                 }
857
858                 list = &unix_socket_table[addr->hash];
859         } else {
860                 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
861                 u->dentry = nd.dentry;
862                 u->mnt    = nd.mnt;
863         }
864
865         err = 0;
866         __unix_remove_socket(sk);
867         u->addr = addr;
868         __unix_insert_socket(list, sk);
869
870 out_unlock:
871         spin_unlock(&unix_table_lock);
872 out_up:
873         mutex_unlock(&u->readlock);
874 out:
875         return err;
876
877 out_mknod_dput:
878         dput(dentry);
879 out_mknod_unlock:
880         mutex_unlock(&nd.dentry->d_inode->i_mutex);
881         path_release(&nd);
882 out_mknod_parent:
883         if (err==-EEXIST)
884                 err=-EADDRINUSE;
885         unix_release_addr(addr);
886         goto out_up;
887 }
888
889 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
890 {
891         if (unlikely(sk1 == sk2) || !sk2) {
892                 unix_state_lock(sk1);
893                 return;
894         }
895         if (sk1 < sk2) {
896                 unix_state_lock(sk1);
897                 unix_state_lock_nested(sk2);
898         } else {
899                 unix_state_lock(sk2);
900                 unix_state_lock_nested(sk1);
901         }
902 }
903
904 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
905 {
906         if (unlikely(sk1 == sk2) || !sk2) {
907                 unix_state_unlock(sk1);
908                 return;
909         }
910         unix_state_unlock(sk1);
911         unix_state_unlock(sk2);
912 }
913
914 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
915                               int alen, int flags)
916 {
917         struct sock *sk = sock->sk;
918         struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
919         struct sock *other;
920         unsigned hash;
921         int err;
922
923         if (addr->sa_family != AF_UNSPEC) {
924                 err = unix_mkname(sunaddr, alen, &hash);
925                 if (err < 0)
926                         goto out;
927                 alen = err;
928
929                 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
930                     !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
931                         goto out;
932
933 restart:
934                 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
935                 if (!other)
936                         goto out;
937
938                 unix_state_double_lock(sk, other);
939
940                 /* Apparently VFS overslept socket death. Retry. */
941                 if (sock_flag(other, SOCK_DEAD)) {
942                         unix_state_double_unlock(sk, other);
943                         sock_put(other);
944                         goto restart;
945                 }
946
947                 err = -EPERM;
948                 if (!unix_may_send(sk, other))
949                         goto out_unlock;
950
951                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
952                 if (err)
953                         goto out_unlock;
954
955         } else {
956                 /*
957                  *      1003.1g breaking connected state with AF_UNSPEC
958                  */
959                 other = NULL;
960                 unix_state_double_lock(sk, other);
961         }
962
963         /*
964          * If it was connected, reconnect.
965          */
966         if (unix_peer(sk)) {
967                 struct sock *old_peer = unix_peer(sk);
968                 unix_peer(sk)=other;
969                 unix_state_double_unlock(sk, other);
970
971                 if (other != old_peer)
972                         unix_dgram_disconnected(sk, old_peer);
973                 sock_put(old_peer);
974         } else {
975                 unix_peer(sk)=other;
976                 unix_state_double_unlock(sk, other);
977         }
978         return 0;
979
980 out_unlock:
981         unix_state_double_unlock(sk, other);
982         sock_put(other);
983 out:
984         return err;
985 }
986
987 static long unix_wait_for_peer(struct sock *other, long timeo)
988 {
989         struct unix_sock *u = unix_sk(other);
990         int sched;
991         DEFINE_WAIT(wait);
992
993         prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
994
995         sched = !sock_flag(other, SOCK_DEAD) &&
996                 !(other->sk_shutdown & RCV_SHUTDOWN) &&
997                 (skb_queue_len(&other->sk_receive_queue) >
998                  other->sk_max_ack_backlog);
999
1000         unix_state_unlock(other);
1001
1002         if (sched)
1003                 timeo = schedule_timeout(timeo);
1004
1005         finish_wait(&u->peer_wait, &wait);
1006         return timeo;
1007 }
1008
1009 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1010                                int addr_len, int flags)
1011 {
1012         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1013         struct sock *sk = sock->sk;
1014         struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1015         struct sock *newsk = NULL;
1016         struct sock *other = NULL;
1017         struct sk_buff *skb = NULL;
1018         unsigned hash;
1019         int st;
1020         int err;
1021         long timeo;
1022
1023         err = unix_mkname(sunaddr, addr_len, &hash);
1024         if (err < 0)
1025                 goto out;
1026         addr_len = err;
1027
1028         if (test_bit(SOCK_PASSCRED, &sock->flags)
1029                 && !u->addr && (err = unix_autobind(sock)) != 0)
1030                 goto out;
1031
1032         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1033
1034         /* First of all allocate resources.
1035            If we will make it after state is locked,
1036            we will have to recheck all again in any case.
1037          */
1038
1039         err = -ENOMEM;
1040
1041         /* create new sock for complete connection */
1042         newsk = unix_create1(NULL);
1043         if (newsk == NULL)
1044                 goto out;
1045
1046         /* Allocate skb for sending to listening sock */
1047         skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1048         if (skb == NULL)
1049                 goto out;
1050
1051 restart:
1052         /*  Find listening sock. */
1053         other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
1054         if (!other)
1055                 goto out;
1056
1057         /* Latch state of peer */
1058         unix_state_lock(other);
1059
1060         /* Apparently VFS overslept socket death. Retry. */
1061         if (sock_flag(other, SOCK_DEAD)) {
1062                 unix_state_unlock(other);
1063                 sock_put(other);
1064                 goto restart;
1065         }
1066
1067         err = -ECONNREFUSED;
1068         if (other->sk_state != TCP_LISTEN)
1069                 goto out_unlock;
1070
1071         if (skb_queue_len(&other->sk_receive_queue) >
1072             other->sk_max_ack_backlog) {
1073                 err = -EAGAIN;
1074                 if (!timeo)
1075                         goto out_unlock;
1076
1077                 timeo = unix_wait_for_peer(other, timeo);
1078
1079                 err = sock_intr_errno(timeo);
1080                 if (signal_pending(current))
1081                         goto out;
1082                 sock_put(other);
1083                 goto restart;
1084         }
1085
1086         /* Latch our state.
1087
1088            It is tricky place. We need to grab write lock and cannot
1089            drop lock on peer. It is dangerous because deadlock is
1090            possible. Connect to self case and simultaneous
1091            attempt to connect are eliminated by checking socket
1092            state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1093            check this before attempt to grab lock.
1094
1095            Well, and we have to recheck the state after socket locked.
1096          */
1097         st = sk->sk_state;
1098
1099         switch (st) {
1100         case TCP_CLOSE:
1101                 /* This is ok... continue with connect */
1102                 break;
1103         case TCP_ESTABLISHED:
1104                 /* Socket is already connected */
1105                 err = -EISCONN;
1106                 goto out_unlock;
1107         default:
1108                 err = -EINVAL;
1109                 goto out_unlock;
1110         }
1111
1112         unix_state_lock_nested(sk);
1113
1114         if (sk->sk_state != st) {
1115                 unix_state_unlock(sk);
1116                 unix_state_unlock(other);
1117                 sock_put(other);
1118                 goto restart;
1119         }
1120
1121         err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1122         if (err) {
1123                 unix_state_unlock(sk);
1124                 goto out_unlock;
1125         }
1126
1127         /* The way is open! Fastly set all the necessary fields... */
1128
1129         sock_hold(sk);
1130         unix_peer(newsk)        = sk;
1131         newsk->sk_state         = TCP_ESTABLISHED;
1132         newsk->sk_type          = sk->sk_type;
1133         newsk->sk_peercred.pid  = current->tgid;
1134         newsk->sk_peercred.uid  = current->euid;
1135         newsk->sk_peercred.gid  = current->egid;
1136         newu = unix_sk(newsk);
1137         newsk->sk_sleep         = &newu->peer_wait;
1138         otheru = unix_sk(other);
1139
1140         /* copy address information from listening to new sock*/
1141         if (otheru->addr) {
1142                 atomic_inc(&otheru->addr->refcnt);
1143                 newu->addr = otheru->addr;
1144         }
1145         if (otheru->dentry) {
1146                 newu->dentry    = dget(otheru->dentry);
1147                 newu->mnt       = mntget(otheru->mnt);
1148         }
1149
1150         /* Set credentials */
1151         sk->sk_peercred = other->sk_peercred;
1152
1153         sock->state     = SS_CONNECTED;
1154         sk->sk_state    = TCP_ESTABLISHED;
1155         sock_hold(newsk);
1156
1157         smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
1158         unix_peer(sk)   = newsk;
1159
1160         unix_state_unlock(sk);
1161
1162         /* take ten and and send info to listening sock */
1163         spin_lock(&other->sk_receive_queue.lock);
1164         __skb_queue_tail(&other->sk_receive_queue, skb);
1165         spin_unlock(&other->sk_receive_queue.lock);
1166         unix_state_unlock(other);
1167         other->sk_data_ready(other, 0);
1168         sock_put(other);
1169         return 0;
1170
1171 out_unlock:
1172         if (other)
1173                 unix_state_unlock(other);
1174
1175 out:
1176         if (skb)
1177                 kfree_skb(skb);
1178         if (newsk)
1179                 unix_release_sock(newsk, 0);
1180         if (other)
1181                 sock_put(other);
1182         return err;
1183 }
1184
1185 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1186 {
1187         struct sock *ska=socka->sk, *skb = sockb->sk;
1188
1189         /* Join our sockets back to back */
1190         sock_hold(ska);
1191         sock_hold(skb);
1192         unix_peer(ska)=skb;
1193         unix_peer(skb)=ska;
1194         ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1195         ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1196         ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1197
1198         if (ska->sk_type != SOCK_DGRAM) {
1199                 ska->sk_state = TCP_ESTABLISHED;
1200                 skb->sk_state = TCP_ESTABLISHED;
1201                 socka->state  = SS_CONNECTED;
1202                 sockb->state  = SS_CONNECTED;
1203         }
1204         return 0;
1205 }
1206
1207 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1208 {
1209         struct sock *sk = sock->sk;
1210         struct sock *tsk;
1211         struct sk_buff *skb;
1212         int err;
1213
1214         err = -EOPNOTSUPP;
1215         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1216                 goto out;
1217
1218         err = -EINVAL;
1219         if (sk->sk_state != TCP_LISTEN)
1220                 goto out;
1221
1222         /* If socket state is TCP_LISTEN it cannot change (for now...),
1223          * so that no locks are necessary.
1224          */
1225
1226         skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1227         if (!skb) {
1228                 /* This means receive shutdown. */
1229                 if (err == 0)
1230                         err = -EINVAL;
1231                 goto out;
1232         }
1233
1234         tsk = skb->sk;
1235         skb_free_datagram(sk, skb);
1236         wake_up_interruptible(&unix_sk(sk)->peer_wait);
1237
1238         /* attach accepted sock to socket */
1239         unix_state_lock(tsk);
1240         newsock->state = SS_CONNECTED;
1241         sock_graft(tsk, newsock);
1242         unix_state_unlock(tsk);
1243         return 0;
1244
1245 out:
1246         return err;
1247 }
1248
1249
1250 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1251 {
1252         struct sock *sk = sock->sk;
1253         struct unix_sock *u;
1254         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1255         int err = 0;
1256
1257         if (peer) {
1258                 sk = unix_peer_get(sk);
1259
1260                 err = -ENOTCONN;
1261                 if (!sk)
1262                         goto out;
1263                 err = 0;
1264         } else {
1265                 sock_hold(sk);
1266         }
1267
1268         u = unix_sk(sk);
1269         unix_state_lock(sk);
1270         if (!u->addr) {
1271                 sunaddr->sun_family = AF_UNIX;
1272                 sunaddr->sun_path[0] = 0;
1273                 *uaddr_len = sizeof(short);
1274         } else {
1275                 struct unix_address *addr = u->addr;
1276
1277                 *uaddr_len = addr->len;
1278                 memcpy(sunaddr, addr->name, *uaddr_len);
1279         }
1280         unix_state_unlock(sk);
1281         sock_put(sk);
1282 out:
1283         return err;
1284 }
1285
1286 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1287 {
1288         int i;
1289
1290         scm->fp = UNIXCB(skb).fp;
1291         skb->destructor = sock_wfree;
1292         UNIXCB(skb).fp = NULL;
1293
1294         for (i=scm->fp->count-1; i>=0; i--)
1295                 unix_notinflight(scm->fp->fp[i]);
1296 }
1297
1298 static void unix_destruct_fds(struct sk_buff *skb)
1299 {
1300         struct scm_cookie scm;
1301         memset(&scm, 0, sizeof(scm));
1302         unix_detach_fds(&scm, skb);
1303
1304         /* Alas, it calls VFS */
1305         /* So fscking what? fput() had been SMP-safe since the last Summer */
1306         scm_destroy(&scm);
1307         sock_wfree(skb);
1308 }
1309
1310 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1311 {
1312         int i;
1313         for (i=scm->fp->count-1; i>=0; i--)
1314                 unix_inflight(scm->fp->fp[i]);
1315         UNIXCB(skb).fp = scm->fp;
1316         skb->destructor = unix_destruct_fds;
1317         scm->fp = NULL;
1318 }
1319
1320 /*
1321  *      Send AF_UNIX data.
1322  */
1323
1324 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1325                               struct msghdr *msg, size_t len)
1326 {
1327         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1328         struct sock *sk = sock->sk;
1329         struct unix_sock *u = unix_sk(sk);
1330         struct sockaddr_un *sunaddr=msg->msg_name;
1331         struct sock *other = NULL;
1332         int namelen = 0; /* fake GCC */
1333         int err;
1334         unsigned hash;
1335         struct sk_buff *skb;
1336         long timeo;
1337         struct scm_cookie tmp_scm;
1338
1339         if (NULL == siocb->scm)
1340                 siocb->scm = &tmp_scm;
1341         err = scm_send(sock, msg, siocb->scm);
1342         if (err < 0)
1343                 return err;
1344
1345         err = -EOPNOTSUPP;
1346         if (msg->msg_flags&MSG_OOB)
1347                 goto out;
1348
1349         if (msg->msg_namelen) {
1350                 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1351                 if (err < 0)
1352                         goto out;
1353                 namelen = err;
1354         } else {
1355                 sunaddr = NULL;
1356                 err = -ENOTCONN;
1357                 other = unix_peer_get(sk);
1358                 if (!other)
1359                         goto out;
1360         }
1361
1362         if (test_bit(SOCK_PASSCRED, &sock->flags)
1363                 && !u->addr && (err = unix_autobind(sock)) != 0)
1364                 goto out;
1365
1366         err = -EMSGSIZE;
1367         if (len > sk->sk_sndbuf - 32)
1368                 goto out;
1369
1370         skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1371         if (skb==NULL)
1372                 goto out;
1373
1374         memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1375         if (siocb->scm->fp)
1376                 unix_attach_fds(siocb->scm, skb);
1377         unix_get_secdata(siocb->scm, skb);
1378
1379         skb_reset_transport_header(skb);
1380         err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1381         if (err)
1382                 goto out_free;
1383
1384         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1385
1386 restart:
1387         if (!other) {
1388                 err = -ECONNRESET;
1389                 if (sunaddr == NULL)
1390                         goto out_free;
1391
1392                 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1393                                         hash, &err);
1394                 if (other==NULL)
1395                         goto out_free;
1396         }
1397
1398         unix_state_lock(other);
1399         err = -EPERM;
1400         if (!unix_may_send(sk, other))
1401                 goto out_unlock;
1402
1403         if (sock_flag(other, SOCK_DEAD)) {
1404                 /*
1405                  *      Check with 1003.1g - what should
1406                  *      datagram error
1407                  */
1408                 unix_state_unlock(other);
1409                 sock_put(other);
1410
1411                 err = 0;
1412                 unix_state_lock(sk);
1413                 if (unix_peer(sk) == other) {
1414                         unix_peer(sk)=NULL;
1415                         unix_state_unlock(sk);
1416
1417                         unix_dgram_disconnected(sk, other);
1418                         sock_put(other);
1419                         err = -ECONNREFUSED;
1420                 } else {
1421                         unix_state_unlock(sk);
1422                 }
1423
1424                 other = NULL;
1425                 if (err)
1426                         goto out_free;
1427                 goto restart;
1428         }
1429
1430         err = -EPIPE;
1431         if (other->sk_shutdown & RCV_SHUTDOWN)
1432                 goto out_unlock;
1433
1434         if (sk->sk_type != SOCK_SEQPACKET) {
1435                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1436                 if (err)
1437                         goto out_unlock;
1438         }
1439
1440         if (unix_peer(other) != sk &&
1441             (skb_queue_len(&other->sk_receive_queue) >
1442              other->sk_max_ack_backlog)) {
1443                 if (!timeo) {
1444                         err = -EAGAIN;
1445                         goto out_unlock;
1446                 }
1447
1448                 timeo = unix_wait_for_peer(other, timeo);
1449
1450                 err = sock_intr_errno(timeo);
1451                 if (signal_pending(current))
1452                         goto out_free;
1453
1454                 goto restart;
1455         }
1456
1457         skb_queue_tail(&other->sk_receive_queue, skb);
1458         unix_state_unlock(other);
1459         other->sk_data_ready(other, len);
1460         sock_put(other);
1461         scm_destroy(siocb->scm);
1462         return len;
1463
1464 out_unlock:
1465         unix_state_unlock(other);
1466 out_free:
1467         kfree_skb(skb);
1468 out:
1469         if (other)
1470                 sock_put(other);
1471         scm_destroy(siocb->scm);
1472         return err;
1473 }
1474
1475
1476 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1477                                struct msghdr *msg, size_t len)
1478 {
1479         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1480         struct sock *sk = sock->sk;
1481         struct sock *other = NULL;
1482         struct sockaddr_un *sunaddr=msg->msg_name;
1483         int err,size;
1484         struct sk_buff *skb;
1485         int sent=0;
1486         struct scm_cookie tmp_scm;
1487
1488         if (NULL == siocb->scm)
1489                 siocb->scm = &tmp_scm;
1490         err = scm_send(sock, msg, siocb->scm);
1491         if (err < 0)
1492                 return err;
1493
1494         err = -EOPNOTSUPP;
1495         if (msg->msg_flags&MSG_OOB)
1496                 goto out_err;
1497
1498         if (msg->msg_namelen) {
1499                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1500                 goto out_err;
1501         } else {
1502                 sunaddr = NULL;
1503                 err = -ENOTCONN;
1504                 other = unix_peer(sk);
1505                 if (!other)
1506                         goto out_err;
1507         }
1508
1509         if (sk->sk_shutdown & SEND_SHUTDOWN)
1510                 goto pipe_err;
1511
1512         while(sent < len)
1513         {
1514                 /*
1515                  *      Optimisation for the fact that under 0.01% of X
1516                  *      messages typically need breaking up.
1517                  */
1518
1519                 size = len-sent;
1520
1521                 /* Keep two messages in the pipe so it schedules better */
1522                 if (size > ((sk->sk_sndbuf >> 1) - 64))
1523                         size = (sk->sk_sndbuf >> 1) - 64;
1524
1525                 if (size > SKB_MAX_ALLOC)
1526                         size = SKB_MAX_ALLOC;
1527
1528                 /*
1529                  *      Grab a buffer
1530                  */
1531
1532                 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1533
1534                 if (skb==NULL)
1535                         goto out_err;
1536
1537                 /*
1538                  *      If you pass two values to the sock_alloc_send_skb
1539                  *      it tries to grab the large buffer with GFP_NOFS
1540                  *      (which can fail easily), and if it fails grab the
1541                  *      fallback size buffer which is under a page and will
1542                  *      succeed. [Alan]
1543                  */
1544                 size = min_t(int, size, skb_tailroom(skb));
1545
1546                 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1547                 if (siocb->scm->fp)
1548                         unix_attach_fds(siocb->scm, skb);
1549
1550                 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1551                         kfree_skb(skb);
1552                         goto out_err;
1553                 }
1554
1555                 unix_state_lock(other);
1556
1557                 if (sock_flag(other, SOCK_DEAD) ||
1558                     (other->sk_shutdown & RCV_SHUTDOWN))
1559                         goto pipe_err_free;
1560
1561                 skb_queue_tail(&other->sk_receive_queue, skb);
1562                 unix_state_unlock(other);
1563                 other->sk_data_ready(other, size);
1564                 sent+=size;
1565         }
1566
1567         scm_destroy(siocb->scm);
1568         siocb->scm = NULL;
1569
1570         return sent;
1571
1572 pipe_err_free:
1573         unix_state_unlock(other);
1574         kfree_skb(skb);
1575 pipe_err:
1576         if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1577                 send_sig(SIGPIPE,current,0);
1578         err = -EPIPE;
1579 out_err:
1580         scm_destroy(siocb->scm);
1581         siocb->scm = NULL;
1582         return sent ? : err;
1583 }
1584
1585 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1586                                   struct msghdr *msg, size_t len)
1587 {
1588         int err;
1589         struct sock *sk = sock->sk;
1590
1591         err = sock_error(sk);
1592         if (err)
1593                 return err;
1594
1595         if (sk->sk_state != TCP_ESTABLISHED)
1596                 return -ENOTCONN;
1597
1598         if (msg->msg_namelen)
1599                 msg->msg_namelen = 0;
1600
1601         return unix_dgram_sendmsg(kiocb, sock, msg, len);
1602 }
1603
1604 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1605 {
1606         struct unix_sock *u = unix_sk(sk);
1607
1608         msg->msg_namelen = 0;
1609         if (u->addr) {
1610                 msg->msg_namelen = u->addr->len;
1611                 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1612         }
1613 }
1614
1615 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1616                               struct msghdr *msg, size_t size,
1617                               int flags)
1618 {
1619         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1620         struct scm_cookie tmp_scm;
1621         struct sock *sk = sock->sk;
1622         struct unix_sock *u = unix_sk(sk);
1623         int noblock = flags & MSG_DONTWAIT;
1624         struct sk_buff *skb;
1625         int err;
1626
1627         err = -EOPNOTSUPP;
1628         if (flags&MSG_OOB)
1629                 goto out;
1630
1631         msg->msg_namelen = 0;
1632
1633         mutex_lock(&u->readlock);
1634
1635         skb = skb_recv_datagram(sk, flags, noblock, &err);
1636         if (!skb)
1637                 goto out_unlock;
1638
1639         wake_up_interruptible(&u->peer_wait);
1640
1641         if (msg->msg_name)
1642                 unix_copy_addr(msg, skb->sk);
1643
1644         if (size > skb->len)
1645                 size = skb->len;
1646         else if (size < skb->len)
1647                 msg->msg_flags |= MSG_TRUNC;
1648
1649         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1650         if (err)
1651                 goto out_free;
1652
1653         if (!siocb->scm) {
1654                 siocb->scm = &tmp_scm;
1655                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1656         }
1657         siocb->scm->creds = *UNIXCREDS(skb);
1658         unix_set_secdata(siocb->scm, skb);
1659
1660         if (!(flags & MSG_PEEK))
1661         {
1662                 if (UNIXCB(skb).fp)
1663                         unix_detach_fds(siocb->scm, skb);
1664         }
1665         else
1666         {
1667                 /* It is questionable: on PEEK we could:
1668                    - do not return fds - good, but too simple 8)
1669                    - return fds, and do not return them on read (old strategy,
1670                      apparently wrong)
1671                    - clone fds (I chose it for now, it is the most universal
1672                      solution)
1673
1674                    POSIX 1003.1g does not actually define this clearly
1675                    at all. POSIX 1003.1g doesn't define a lot of things
1676                    clearly however!
1677
1678                 */
1679                 if (UNIXCB(skb).fp)
1680                         siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1681         }
1682         err = size;
1683
1684         scm_recv(sock, msg, siocb->scm, flags);
1685
1686 out_free:
1687         skb_free_datagram(sk,skb);
1688 out_unlock:
1689         mutex_unlock(&u->readlock);
1690 out:
1691         return err;
1692 }
1693
1694 /*
1695  *      Sleep until data has arrive. But check for races..
1696  */
1697
1698 static long unix_stream_data_wait(struct sock * sk, long timeo)
1699 {
1700         DEFINE_WAIT(wait);
1701
1702         unix_state_lock(sk);
1703
1704         for (;;) {
1705                 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1706
1707                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1708                     sk->sk_err ||
1709                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
1710                     signal_pending(current) ||
1711                     !timeo)
1712                         break;
1713
1714                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1715                 unix_state_unlock(sk);
1716                 timeo = schedule_timeout(timeo);
1717                 unix_state_lock(sk);
1718                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1719         }
1720
1721         finish_wait(sk->sk_sleep, &wait);
1722         unix_state_unlock(sk);
1723         return timeo;
1724 }
1725
1726
1727
1728 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1729                                struct msghdr *msg, size_t size,
1730                                int flags)
1731 {
1732         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1733         struct scm_cookie tmp_scm;
1734         struct sock *sk = sock->sk;
1735         struct unix_sock *u = unix_sk(sk);
1736         struct sockaddr_un *sunaddr=msg->msg_name;
1737         int copied = 0;
1738         int check_creds = 0;
1739         int target;
1740         int err = 0;
1741         long timeo;
1742
1743         err = -EINVAL;
1744         if (sk->sk_state != TCP_ESTABLISHED)
1745                 goto out;
1746
1747         err = -EOPNOTSUPP;
1748         if (flags&MSG_OOB)
1749                 goto out;
1750
1751         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1752         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1753
1754         msg->msg_namelen = 0;
1755
1756         /* Lock the socket to prevent queue disordering
1757          * while sleeps in memcpy_tomsg
1758          */
1759
1760         if (!siocb->scm) {
1761                 siocb->scm = &tmp_scm;
1762                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1763         }
1764
1765         mutex_lock(&u->readlock);
1766
1767         do
1768         {
1769                 int chunk;
1770                 struct sk_buff *skb;
1771
1772                 unix_state_lock(sk);
1773                 skb = skb_dequeue(&sk->sk_receive_queue);
1774                 if (skb==NULL)
1775                 {
1776                         if (copied >= target)
1777                                 goto unlock;
1778
1779                         /*
1780                          *      POSIX 1003.1g mandates this order.
1781                          */
1782
1783                         if ((err = sock_error(sk)) != 0)
1784                                 goto unlock;
1785                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1786                                 goto unlock;
1787
1788                         unix_state_unlock(sk);
1789                         err = -EAGAIN;
1790                         if (!timeo)
1791                                 break;
1792                         mutex_unlock(&u->readlock);
1793
1794                         timeo = unix_stream_data_wait(sk, timeo);
1795
1796                         if (signal_pending(current)) {
1797                                 err = sock_intr_errno(timeo);
1798                                 goto out;
1799                         }
1800                         mutex_lock(&u->readlock);
1801                         continue;
1802  unlock:
1803                         unix_state_unlock(sk);
1804                         break;
1805                 }
1806                 unix_state_unlock(sk);
1807
1808                 if (check_creds) {
1809                         /* Never glue messages from different writers */
1810                         if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1811                                 skb_queue_head(&sk->sk_receive_queue, skb);
1812                                 break;
1813                         }
1814                 } else {
1815                         /* Copy credentials */
1816                         siocb->scm->creds = *UNIXCREDS(skb);
1817                         check_creds = 1;
1818                 }
1819
1820                 /* Copy address just once */
1821                 if (sunaddr)
1822                 {
1823                         unix_copy_addr(msg, skb->sk);
1824                         sunaddr = NULL;
1825                 }
1826
1827                 chunk = min_t(unsigned int, skb->len, size);
1828                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1829                         skb_queue_head(&sk->sk_receive_queue, skb);
1830                         if (copied == 0)
1831                                 copied = -EFAULT;
1832                         break;
1833                 }
1834                 copied += chunk;
1835                 size -= chunk;
1836
1837                 /* Mark read part of skb as used */
1838                 if (!(flags & MSG_PEEK))
1839                 {
1840                         skb_pull(skb, chunk);
1841
1842                         if (UNIXCB(skb).fp)
1843                                 unix_detach_fds(siocb->scm, skb);
1844
1845                         /* put the skb back if we didn't use it up.. */
1846                         if (skb->len)
1847                         {
1848                                 skb_queue_head(&sk->sk_receive_queue, skb);
1849                                 break;
1850                         }
1851
1852                         kfree_skb(skb);
1853
1854                         if (siocb->scm->fp)
1855                                 break;
1856                 }
1857                 else
1858                 {
1859                         /* It is questionable, see note in unix_dgram_recvmsg.
1860                          */
1861                         if (UNIXCB(skb).fp)
1862                                 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1863
1864                         /* put message back and return */
1865                         skb_queue_head(&sk->sk_receive_queue, skb);
1866                         break;
1867                 }
1868         } while (size);
1869
1870         mutex_unlock(&u->readlock);
1871         scm_recv(sock, msg, siocb->scm, flags);
1872 out:
1873         return copied ? : err;
1874 }
1875
1876 static int unix_shutdown(struct socket *sock, int mode)
1877 {
1878         struct sock *sk = sock->sk;
1879         struct sock *other;
1880
1881         mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1882
1883         if (mode) {
1884                 unix_state_lock(sk);
1885                 sk->sk_shutdown |= mode;
1886                 other=unix_peer(sk);
1887                 if (other)
1888                         sock_hold(other);
1889                 unix_state_unlock(sk);
1890                 sk->sk_state_change(sk);
1891
1892                 if (other &&
1893                         (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1894
1895                         int peer_mode = 0;
1896
1897                         if (mode&RCV_SHUTDOWN)
1898                                 peer_mode |= SEND_SHUTDOWN;
1899                         if (mode&SEND_SHUTDOWN)
1900                                 peer_mode |= RCV_SHUTDOWN;
1901                         unix_state_lock(other);
1902                         other->sk_shutdown |= peer_mode;
1903                         unix_state_unlock(other);
1904                         other->sk_state_change(other);
1905                         read_lock(&other->sk_callback_lock);
1906                         if (peer_mode == SHUTDOWN_MASK)
1907                                 sk_wake_async(other,1,POLL_HUP);
1908                         else if (peer_mode & RCV_SHUTDOWN)
1909                                 sk_wake_async(other,1,POLL_IN);
1910                         read_unlock(&other->sk_callback_lock);
1911                 }
1912                 if (other)
1913                         sock_put(other);
1914         }
1915         return 0;
1916 }
1917
1918 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1919 {
1920         struct sock *sk = sock->sk;
1921         long amount=0;
1922         int err;
1923
1924         switch(cmd)
1925         {
1926                 case SIOCOUTQ:
1927                         amount = atomic_read(&sk->sk_wmem_alloc);
1928                         err = put_user(amount, (int __user *)arg);
1929                         break;
1930                 case SIOCINQ:
1931                 {
1932                         struct sk_buff *skb;
1933
1934                         if (sk->sk_state == TCP_LISTEN) {
1935                                 err = -EINVAL;
1936                                 break;
1937                         }
1938
1939                         spin_lock(&sk->sk_receive_queue.lock);
1940                         if (sk->sk_type == SOCK_STREAM ||
1941                             sk->sk_type == SOCK_SEQPACKET) {
1942                                 skb_queue_walk(&sk->sk_receive_queue, skb)
1943                                         amount += skb->len;
1944                         } else {
1945                                 skb = skb_peek(&sk->sk_receive_queue);
1946                                 if (skb)
1947                                         amount=skb->len;
1948                         }
1949                         spin_unlock(&sk->sk_receive_queue.lock);
1950                         err = put_user(amount, (int __user *)arg);
1951                         break;
1952                 }
1953
1954                 default:
1955                         err = -ENOIOCTLCMD;
1956                         break;
1957         }
1958         return err;
1959 }
1960
1961 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1962 {
1963         struct sock *sk = sock->sk;
1964         unsigned int mask;
1965
1966         poll_wait(file, sk->sk_sleep, wait);
1967         mask = 0;
1968
1969         /* exceptional events? */
1970         if (sk->sk_err)
1971                 mask |= POLLERR;
1972         if (sk->sk_shutdown == SHUTDOWN_MASK)
1973                 mask |= POLLHUP;
1974         if (sk->sk_shutdown & RCV_SHUTDOWN)
1975                 mask |= POLLRDHUP;
1976
1977         /* readable? */
1978         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1979             (sk->sk_shutdown & RCV_SHUTDOWN))
1980                 mask |= POLLIN | POLLRDNORM;
1981
1982         /* Connection-based need to check for termination and startup */
1983         if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1984                 mask |= POLLHUP;
1985
1986         /*
1987          * we set writable also when the other side has shut down the
1988          * connection. This prevents stuck sockets.
1989          */
1990         if (unix_writable(sk))
1991                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1992
1993         return mask;
1994 }
1995
1996
1997 #ifdef CONFIG_PROC_FS
1998 static struct sock *unix_seq_idx(int *iter, loff_t pos)
1999 {
2000         loff_t off = 0;
2001         struct sock *s;
2002
2003         for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
2004                 if (off == pos)
2005                         return s;
2006                 ++off;
2007         }
2008         return NULL;
2009 }
2010
2011
2012 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2013 {
2014         spin_lock(&unix_table_lock);
2015         return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
2016 }
2017
2018 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2019 {
2020         ++*pos;
2021
2022         if (v == (void *)1)
2023                 return first_unix_socket(seq->private);
2024         return next_unix_socket(seq->private, v);
2025 }
2026
2027 static void unix_seq_stop(struct seq_file *seq, void *v)
2028 {
2029         spin_unlock(&unix_table_lock);
2030 }
2031
2032 static int unix_seq_show(struct seq_file *seq, void *v)
2033 {
2034
2035         if (v == (void *)1)
2036                 seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2037                          "Inode Path\n");
2038         else {
2039                 struct sock *s = v;
2040                 struct unix_sock *u = unix_sk(s);
2041                 unix_state_lock(s);
2042
2043                 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2044                         s,
2045                         atomic_read(&s->sk_refcnt),
2046                         0,
2047                         s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2048                         s->sk_type,
2049                         s->sk_socket ?
2050                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2051                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2052                         sock_i_ino(s));
2053
2054                 if (u->addr) {
2055                         int i, len;
2056                         seq_putc(seq, ' ');
2057
2058                         i = 0;
2059                         len = u->addr->len - sizeof(short);
2060                         if (!UNIX_ABSTRACT(s))
2061                                 len--;
2062                         else {
2063                                 seq_putc(seq, '@');
2064                                 i++;
2065                         }
2066                         for ( ; i < len; i++)
2067                                 seq_putc(seq, u->addr->name->sun_path[i]);
2068                 }
2069                 unix_state_unlock(s);
2070                 seq_putc(seq, '\n');
2071         }
2072
2073         return 0;
2074 }
2075
2076 static const struct seq_operations unix_seq_ops = {
2077         .start  = unix_seq_start,
2078         .next   = unix_seq_next,
2079         .stop   = unix_seq_stop,
2080         .show   = unix_seq_show,
2081 };
2082
2083
2084 static int unix_seq_open(struct inode *inode, struct file *file)
2085 {
2086         struct seq_file *seq;
2087         int rc = -ENOMEM;
2088         int *iter = kmalloc(sizeof(int), GFP_KERNEL);
2089
2090         if (!iter)
2091                 goto out;
2092
2093         rc = seq_open(file, &unix_seq_ops);
2094         if (rc)
2095                 goto out_kfree;
2096
2097         seq          = file->private_data;
2098         seq->private = iter;
2099         *iter = 0;
2100 out:
2101         return rc;
2102 out_kfree:
2103         kfree(iter);
2104         goto out;
2105 }
2106
2107 static const struct file_operations unix_seq_fops = {
2108         .owner          = THIS_MODULE,
2109         .open           = unix_seq_open,
2110         .read           = seq_read,
2111         .llseek         = seq_lseek,
2112         .release        = seq_release_private,
2113 };
2114
2115 #endif
2116
2117 static struct net_proto_family unix_family_ops = {
2118         .family = PF_UNIX,
2119         .create = unix_create,
2120         .owner  = THIS_MODULE,
2121 };
2122
2123 static int __init af_unix_init(void)
2124 {
2125         int rc = -1;
2126         struct sk_buff *dummy_skb;
2127
2128         BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2129
2130         rc = proto_register(&unix_proto, 1);
2131         if (rc != 0) {
2132                 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2133                        __FUNCTION__);
2134                 goto out;
2135         }
2136
2137         sock_register(&unix_family_ops);
2138 #ifdef CONFIG_PROC_FS
2139         proc_net_fops_create(&init_net, "unix", 0, &unix_seq_fops);
2140 #endif
2141         unix_sysctl_register();
2142 out:
2143         return rc;
2144 }
2145
2146 static void __exit af_unix_exit(void)
2147 {
2148         sock_unregister(PF_UNIX);
2149         unix_sysctl_unregister();
2150         proc_net_remove(&init_net, "unix");
2151         proto_unregister(&unix_proto);
2152 }
2153
2154 module_init(af_unix_init);
2155 module_exit(af_unix_exit);
2156
2157 MODULE_LICENSE("GPL");
2158 MODULE_ALIAS_NETPROTO(PF_UNIX);