2 * net/sched/estimator.c Simple rate estimator.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <asm/uaccess.h>
13 #include <asm/system.h>
14 #include <asm/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/jiffies.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/init.h>
31 #include <net/pkt_sched.h>
34 This code is NOT intended to be used for statistics collection,
35 its purpose is to provide a base for statistical multiplexing
36 for controlled load service.
37 If you need only statistics, run a user level daemon which
38 periodically reads byte counters.
40 Unfortunately, rate estimation is not a very easy task.
41 F.e. I did not find a simple way to estimate the current peak rate
42 and even failed to formulate the problem 8)8)
44 So I preferred not to built an estimator into the scheduler,
45 but run this task separately.
46 Ideally, it should be kernel thread(s), but for now it runs
47 from timers, which puts apparent top bounds on the number of rated
48 flows, has minimal overhead on small, but is enough
49 to handle controlled load service, sets of aggregates.
51 We measure rate over A=(1<<interval) seconds and evaluate EWMA:
53 avrate = avrate*(1-W) + rate*W
55 where W is chosen as negative power of 2: W = 2^(-ewma_log)
57 The resulting time constant is:
64 * The stored value for avbps is scaled by 2^5, so that maximal
65 rate is ~1Gbit, avpps is scaled by 2^10.
67 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
68 for HZ=100 and HZ=1024 8)), maximal interval
69 is (HZ/4)*2^EST_MAX_INTERVAL = 8sec. Shorter intervals
70 are too expensive, longer ones can be implemented
71 at user level painlessly.
78 #define EST_MAX_INTERVAL 5
80 struct qdisc_estimator
82 struct qdisc_estimator *next;
83 struct tc_stats *stats;
84 spinlock_t *stats_lock;
93 struct qdisc_estimator_head
95 struct timer_list timer;
96 struct qdisc_estimator *list;
99 static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
101 /* Estimator array lock */
102 static rwlock_t est_lock = RW_LOCK_UNLOCKED;
104 static void est_timer(unsigned long arg)
107 struct qdisc_estimator *e;
109 read_lock(&est_lock);
110 for (e = elist[idx].list; e; e = e->next) {
111 struct tc_stats *st = e->stats;
116 spin_lock(e->stats_lock);
118 npackets = st->packets;
119 rate = (nbytes - e->last_bytes)<<(7 - idx);
120 e->last_bytes = nbytes;
121 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
122 st->bps = (e->avbps+0xF)>>5;
124 rate = (npackets - e->last_packets)<<(12 - idx);
125 e->last_packets = npackets;
126 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
127 e->stats->pps = (e->avpps+0x1FF)>>10;
128 spin_unlock(e->stats_lock);
131 mod_timer(&elist[idx].timer, jiffies + ((HZ/4)<<idx));
132 read_unlock(&est_lock);
135 int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct rtattr *opt)
137 struct qdisc_estimator *est;
138 struct tc_estimator *parm = RTA_DATA(opt);
140 if (RTA_PAYLOAD(opt) < sizeof(*parm))
143 if (parm->interval < -2 || parm->interval > 3)
146 est = kmalloc(sizeof(*est), GFP_KERNEL);
150 memset(est, 0, sizeof(*est));
151 est->interval = parm->interval + 2;
153 est->stats_lock = stats_lock;
154 est->ewma_log = parm->ewma_log;
155 est->last_bytes = stats->bytes;
156 est->avbps = stats->bps<<5;
157 est->last_packets = stats->packets;
158 est->avpps = stats->pps<<10;
160 est->next = elist[est->interval].list;
161 if (est->next == NULL) {
162 init_timer(&elist[est->interval].timer);
163 elist[est->interval].timer.data = est->interval;
164 elist[est->interval].timer.expires = jiffies + ((HZ/4)<<est->interval);
165 elist[est->interval].timer.function = est_timer;
166 add_timer(&elist[est->interval].timer);
168 write_lock_bh(&est_lock);
169 elist[est->interval].list = est;
170 write_unlock_bh(&est_lock);
174 void qdisc_kill_estimator(struct tc_stats *stats)
177 struct qdisc_estimator *est, **pest;
179 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
181 pest = &elist[idx].list;
182 while ((est=*pest) != NULL) {
183 if (est->stats != stats) {
188 write_lock_bh(&est_lock);
190 write_unlock_bh(&est_lock);
195 if (killed && elist[idx].list == NULL)
196 del_timer(&elist[idx].timer);
200 EXPORT_SYMBOL(qdisc_kill_estimator);
201 EXPORT_SYMBOL(qdisc_new_estimator);