Merge with rsync://fileserver/linux
[powerpc.git] / net / ipv4 / tcp_ipv4.c
index 95528a7..62f62bb 100644 (file)
@@ -874,7 +874,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
                                              __u16 rport,
                                              __u32 raddr, __u32 laddr)
 {
-       struct tcp_listen_opt *lopt = tp->listen_opt;
+       struct listen_sock *lopt = tp->accept_queue.listen_opt;
        struct request_sock *req, **prev;
 
        for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
@@ -898,18 +898,10 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
 static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_listen_opt *lopt = tp->listen_opt;
+       struct listen_sock *lopt = tp->accept_queue.listen_opt;
        u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
 
-       req->expires = jiffies + TCP_TIMEOUT_INIT;
-       req->retrans = 0;
-       req->sk = NULL;
-       req->dl_next = lopt->syn_table[h];
-
-       write_lock(&tp->syn_wait_lock);
-       lopt->syn_table[h] = req;
-       write_unlock(&tp->syn_wait_lock);
-
+       reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
        tcp_synq_added(sk);
 }
 
@@ -1374,21 +1366,6 @@ static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
        return dopt;
 }
 
-/*
- * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
- * One SYN_RECV socket costs about 80bytes on a 32bit machine.
- * It would be better to replace it with a global counter for all sockets
- * but then some measure against one socket starving all other sockets
- * would be needed.
- *
- * It was 128 by default. Experiments with real servers show, that
- * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
- * Further increasing requires to change hash table size.
- */
-int sysctl_max_syn_backlog = 256;
-
 struct request_sock_ops tcp_request_sock_ops = {
        .family         =       PF_INET,
        .obj_size       =       sizeof(struct tcp_request_sock),
@@ -2068,9 +2045,10 @@ static int tcp_v4_init_sock(struct sock *sk)
         */
        tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
        tp->snd_cwnd_clamp = ~0;
-       tp->mss_cache_std = tp->mss_cache = 536;
+       tp->mss_cache = 536;
 
        tp->reordering = sysctl_tcp_reordering;
+       tp->ca_ops = &tcp_init_congestion_ops;
 
        sk->sk_state = TCP_CLOSE;
 
@@ -2093,6 +2071,8 @@ int tcp_v4_destroy_sock(struct sock *sk)
 
        tcp_clear_xmit_timers(sk);
 
+       tcp_cleanup_congestion_control(tp);
+
        /* Cleanup up the write buffer. */
        sk_stream_writequeue_purge(sk);
 
@@ -2167,17 +2147,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
                        if (++st->sbucket >= TCP_SYNQ_HSIZE)
                                break;
 get_req:
-                       req = tp->listen_opt->syn_table[st->sbucket];
+                       req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
                }
                sk        = sk_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
        } else {
                tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen)
+               read_lock_bh(&tp->accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&tp->accept_queue))
                        goto start_req;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
                sk = sk_next(sk);
        }
 get_sk:
@@ -2187,8 +2167,8 @@ get_sk:
                        goto out;
                }
                tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen) {
+               read_lock_bh(&tp->accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&tp->accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
                        st->syn_wait_sk = sk;
@@ -2196,7 +2176,7 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
        }
        if (++st->bucket < TCP_LHTABLE_SIZE) {
                sk = sk_head(&tcp_listening_hash[st->bucket]);
@@ -2383,7 +2363,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
                        struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
-                       read_unlock_bh(&tp->syn_wait_lock);
+                       read_unlock_bh(&tp->accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
@@ -2670,7 +2650,6 @@ EXPORT_SYMBOL(tcp_proc_register);
 EXPORT_SYMBOL(tcp_proc_unregister);
 #endif
 EXPORT_SYMBOL(sysctl_local_port_range);
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);