Merge branch 'master'
[powerpc.git] / net / core / sock.c
index a96ea7d..51fcfbc 100644 (file)
@@ -92,7 +92,6 @@
  */
 
 #include <linux/capability.h>
-#include <linux/config.h>
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/socket.h>
 #include <net/tcp.h>
 #endif
 
+/*
+ * Each address family might have different locking rules, so we have
+ * one slock key per address family:
+ */
+static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_slock_keys[AF_MAX];
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * Make lock validator output more readable. (we pre-construct these
+ * strings build-time, so that runtime initialization of socket
+ * locks is fast):
+ */
+static const char *af_family_key_strings[AF_MAX+1] = {
+  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
+  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
+  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
+  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
+  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
+  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
+  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
+  "sk_lock-21"       , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
+  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
+  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-29"          ,
+  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX"
+};
+static const char *af_family_slock_key_strings[AF_MAX+1] = {
+  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
+  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
+  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
+  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
+  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
+  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
+  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
+  "slock-21"       , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
+  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
+  "slock-27"       , "slock-28"          , "slock-29"          ,
+  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_MAX"
+};
+#endif
+
+/*
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+static struct lock_class_key af_callback_keys[AF_MAX];
+
 /* Take into consideration the size of the struct sk_buff overhead in the
  * determination of these values, since that is non-constant across
  * platforms.  This makes socket queueing behavior and performance
@@ -238,9 +284,16 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
        skb->dev = NULL;
 
        bh_lock_sock(sk);
-       if (!sock_owned_by_user(sk))
+       if (!sock_owned_by_user(sk)) {
+               /*
+                * trylock + unlock semantics:
+                */
+               mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
+
                rc = sk->sk_backlog_rcv(sk, skb);
-       else
+
+               mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+       } else
                sk_add_backlog(sk, skb);
        bh_unlock_sock(sk);
 out:
@@ -385,7 +438,21 @@ set_sndbuf:
                                val = sysctl_rmem_max;
 set_rcvbuf:
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-                       /* FIXME: is this lower bound the right one? */
+                       /*
+                        * We double it on the way in to account for
+                        * "struct sk_buff" etc. overhead.   Applications
+                        * assume that the SO_RCVBUF setting they make will
+                        * allow that much actual data to be received on that
+                        * socket.
+                        *
+                        * Applications are unaware that "struct sk_buff" and
+                        * other overheads allocate from the receive buffer
+                        * during socket buffer allocation.
+                        *
+                        * And after considering the possible alternatives,
+                        * returning the value we actually used in getsockopt
+                        * is the most desirable behavior.
+                        */
                        if ((val * 2) < SOCK_MIN_RCVBUF)
                                sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
                        else
@@ -551,6 +618,13 @@ set_rcvbuf:
                        ret = -ENONET;
                        break;
 
+               case SO_PASSSEC:
+                       if (valbool)
+                               set_bit(SOCK_PASSSEC, &sock->flags);
+                       else
+                               clear_bit(SOCK_PASSSEC, &sock->flags);
+                       break;
+
                /* We implement the SO_SNDLOWAT etc to
                   not be settable (1003.1g 5.3) */
                default:
@@ -709,6 +783,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                        v.val = sk->sk_state == TCP_LISTEN;
                        break;
 
+               case SO_PASSSEC:
+                       v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+                       break;
+
                case SO_PEERSEC:
                        return security_socket_getpeersec_stream(sock, optval, optlen, len);
 
@@ -725,6 +803,33 @@ lenout:
        return 0;
 }
 
+/*
+ * Initialize an sk_lock.
+ *
+ * (We also register the sk_lock with the lock validator.)
+ */
+static void inline sock_lock_init(struct sock *sk)
+{
+       spin_lock_init(&sk->sk_lock.slock);
+       sk->sk_lock.owner = NULL;
+       init_waitqueue_head(&sk->sk_lock.wq);
+       /*
+        * Make sure we are not reinitializing a held lock:
+        */
+       debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
+
+       /*
+        * Mark both the sk_lock and the sk_lock.slock as a
+        * per-address-family lock class:
+        */
+       lockdep_set_class_and_name(&sk->sk_lock.slock,
+                                  af_family_slock_keys + sk->sk_family,
+                                  af_family_slock_key_strings[sk->sk_family]);
+       lockdep_init_map(&sk->sk_lock.dep_map,
+                        af_family_key_strings[sk->sk_family],
+                        af_family_keys + sk->sk_family);
+}
+
 /**
  *     sk_alloc - All socket objects are allocated here
  *     @family: protocol family
@@ -818,9 +923,14 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                atomic_set(&newsk->sk_omem_alloc, 0);
                skb_queue_head_init(&newsk->sk_receive_queue);
                skb_queue_head_init(&newsk->sk_write_queue);
+#ifdef CONFIG_NET_DMA
+               skb_queue_head_init(&newsk->sk_async_wait_queue);
+#endif
 
                rwlock_init(&newsk->sk_dst_lock);
                rwlock_init(&newsk->sk_callback_lock);
+               lockdep_set_class(&newsk->sk_callback_lock,
+                                  af_callback_keys + newsk->sk_family);
 
                newsk->sk_dst_cache     = NULL;
                newsk->sk_wmem_queued   = 0;
@@ -1369,6 +1479,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        skb_queue_head_init(&sk->sk_receive_queue);
        skb_queue_head_init(&sk->sk_write_queue);
        skb_queue_head_init(&sk->sk_error_queue);
+#ifdef CONFIG_NET_DMA
+       skb_queue_head_init(&sk->sk_async_wait_queue);
+#endif
 
        sk->sk_send_head        =       NULL;
 
@@ -1392,6 +1505,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        rwlock_init(&sk->sk_dst_lock);
        rwlock_init(&sk->sk_callback_lock);
+       lockdep_set_class(&sk->sk_callback_lock,
+                          af_callback_keys + sk->sk_family);
 
        sk->sk_state_change     =       sock_def_wakeup;
        sk->sk_data_ready       =       sock_def_readable;
@@ -1419,24 +1534,34 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 void fastcall lock_sock(struct sock *sk)
 {
        might_sleep();
-       spin_lock_bh(&(sk->sk_lock.slock));
+       spin_lock_bh(&sk->sk_lock.slock);
        if (sk->sk_lock.owner)
                __lock_sock(sk);
        sk->sk_lock.owner = (void *)1;
-       spin_unlock_bh(&(sk->sk_lock.slock));
+       spin_unlock(&sk->sk_lock.slock);
+       /*
+        * The sk_lock has mutex_lock() semantics here:
+        */
+       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+       local_bh_enable();
 }
 
 EXPORT_SYMBOL(lock_sock);
 
 void fastcall release_sock(struct sock *sk)
 {
-       spin_lock_bh(&(sk->sk_lock.slock));
+       /*
+        * The sk_lock has mutex_unlock() semantics:
+        */
+       mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+
+       spin_lock_bh(&sk->sk_lock.slock);
        if (sk->sk_backlog.tail)
                __release_sock(sk);
        sk->sk_lock.owner = NULL;
-        if (waitqueue_active(&(sk->sk_lock.wq)))
-               wake_up(&(sk->sk_lock.wq));
-       spin_unlock_bh(&(sk->sk_lock.slock));
+       if (waitqueue_active(&sk->sk_lock.wq))
+               wake_up(&sk->sk_lock.wq);
+       spin_unlock_bh(&sk->sk_lock.slock);
 }
 EXPORT_SYMBOL(release_sock);