Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * net/sched/gen_estimator.c Simple rate estimator. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | * |
| 9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 10 | * |
| 11 | * Changes: |
| 12 | * Jamal Hadi Salim - moved it to net/core and reshulfed |
| 13 | * names to make it usable in general net subsystem. |
| 14 | */ |
| 15 | |
| 16 | #include <asm/uaccess.h> |
Jiri Slaby | 1977f03 | 2007-10-18 23:40:25 -0700 | [diff] [blame] | 17 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/module.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/jiffies.h> |
| 22 | #include <linux/string.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/socket.h> |
| 25 | #include <linux/sockios.h> |
| 26 | #include <linux/in.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/netdevice.h> |
| 30 | #include <linux/skbuff.h> |
| 31 | #include <linux/rtnetlink.h> |
| 32 | #include <linux/init.h> |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 33 | #include <linux/rbtree.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <net/sock.h> |
| 36 | #include <net/gen_stats.h> |
| 37 | |
| 38 | /* |
| 39 | This code is NOT intended to be used for statistics collection, |
| 40 | its purpose is to provide a base for statistical multiplexing |
| 41 | for controlled load service. |
| 42 | If you need only statistics, run a user level daemon which |
| 43 | periodically reads byte counters. |
| 44 | |
| 45 | Unfortunately, rate estimation is not a very easy task. |
| 46 | F.e. I did not find a simple way to estimate the current peak rate |
| 47 | and even failed to formulate the problem 8)8) |
| 48 | |
| 49 | So I preferred not to built an estimator into the scheduler, |
| 50 | but run this task separately. |
| 51 | Ideally, it should be kernel thread(s), but for now it runs |
| 52 | from timers, which puts apparent top bounds on the number of rated |
| 53 | flows, has minimal overhead on small, but is enough |
| 54 | to handle controlled load service, sets of aggregates. |
| 55 | |
| 56 | We measure rate over A=(1<<interval) seconds and evaluate EWMA: |
| 57 | |
| 58 | avrate = avrate*(1-W) + rate*W |
| 59 | |
| 60 | where W is chosen as negative power of 2: W = 2^(-ewma_log) |
| 61 | |
| 62 | The resulting time constant is: |
| 63 | |
| 64 | T = A/(-ln(1-W)) |
| 65 | |
| 66 | |
| 67 | NOTES. |
| 68 | |
Eric Dumazet | 32f675b | 2015-07-02 15:57:19 +0200 | [diff] [blame] | 69 | * avbps and avpps are scaled by 2^5. |
Eric Dumazet | 511e11e | 2009-05-18 19:26:37 -0700 | [diff] [blame] | 70 | * both values are reported as 32 bit unsigned values. bps can |
| 71 | overflow for fast links : max speed being 34360Mbit/sec |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | * Minimal interval is HZ/4=250msec (it is the greatest common divisor |
| 73 | for HZ=100 and HZ=1024 8)), maximal interval |
| 74 | is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals |
| 75 | are too expensive, longer ones can be implemented |
| 76 | at user level painlessly. |
| 77 | */ |
| 78 | |
| 79 | #define EST_MAX_INTERVAL 5 |
| 80 | |
| 81 | struct gen_estimator |
| 82 | { |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 83 | struct list_head list; |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 84 | struct gnet_stats_basic_packed *bstats; |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 85 | struct gnet_stats_rate_est64 *rate_est; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | spinlock_t *stats_lock; |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 87 | seqcount_t *running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | int ewma_log; |
Eric Dumazet | 32f675b | 2015-07-02 15:57:19 +0200 | [diff] [blame] | 89 | u32 last_packets; |
| 90 | unsigned long avpps; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | u64 last_bytes; |
Eric Dumazet | 511e11e | 2009-05-18 19:26:37 -0700 | [diff] [blame] | 92 | u64 avbps; |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 93 | struct rcu_head e_rcu; |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 94 | struct rb_node node; |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 95 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
| 96 | struct rcu_head head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | }; |
| 98 | |
| 99 | struct gen_estimator_head |
| 100 | { |
| 101 | struct timer_list timer; |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 102 | struct list_head list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | }; |
| 104 | |
| 105 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; |
| 106 | |
David S. Miller | deb3abf | 2008-08-18 22:32:10 -0700 | [diff] [blame] | 107 | /* Protects against NULL dereference */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | static DEFINE_RWLOCK(est_lock); |
| 109 | |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 110 | /* Protects against soft lockup during large deletion */ |
| 111 | static struct rb_root est_root = RB_ROOT; |
Eric Dumazet | ae638c4 | 2010-06-08 23:39:10 +0000 | [diff] [blame] | 112 | static DEFINE_SPINLOCK(est_tree_lock); |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | static void est_timer(unsigned long arg) |
| 115 | { |
| 116 | int idx = (int)arg; |
| 117 | struct gen_estimator *e; |
| 118 | |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 119 | rcu_read_lock(); |
| 120 | list_for_each_entry_rcu(e, &elist[idx].list, list) { |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 121 | struct gnet_stats_basic_packed b = {0}; |
Eric Dumazet | 32f675b | 2015-07-02 15:57:19 +0200 | [diff] [blame] | 122 | unsigned long rate; |
Eric Dumazet | 511e11e | 2009-05-18 19:26:37 -0700 | [diff] [blame] | 123 | u64 brate; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 125 | if (e->stats_lock) |
| 126 | spin_lock(e->stats_lock); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 127 | read_lock(&est_lock); |
| 128 | if (e->bstats == NULL) |
| 129 | goto skip; |
| 130 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 131 | __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats); |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 132 | |
| 133 | brate = (b.bytes - e->last_bytes)<<(7 - idx); |
| 134 | e->last_bytes = b.bytes; |
Jarek Poplawski | a1dcb66 | 2009-05-25 22:47:01 -0700 | [diff] [blame] | 135 | e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 136 | WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Eric Dumazet | 32f675b | 2015-07-02 15:57:19 +0200 | [diff] [blame] | 138 | rate = b.packets - e->last_packets; |
| 139 | rate <<= (7 - idx); |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 140 | e->last_packets = b.packets; |
Jarek Poplawski | a1dcb66 | 2009-05-25 22:47:01 -0700 | [diff] [blame] | 141 | e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 142 | WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 143 | skip: |
| 144 | read_unlock(&est_lock); |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 145 | if (e->stats_lock) |
| 146 | spin_unlock(e->stats_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 149 | if (!list_empty(&elist[idx].list)) |
Eric Dumazet | 789675e | 2008-01-03 20:40:01 -0800 | [diff] [blame] | 150 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 151 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 154 | static void gen_add_node(struct gen_estimator *est) |
| 155 | { |
| 156 | struct rb_node **p = &est_root.rb_node, *parent = NULL; |
| 157 | |
| 158 | while (*p) { |
| 159 | struct gen_estimator *e; |
| 160 | |
| 161 | parent = *p; |
| 162 | e = rb_entry(parent, struct gen_estimator, node); |
| 163 | |
| 164 | if (est->bstats > e->bstats) |
| 165 | p = &parent->rb_right; |
| 166 | else |
| 167 | p = &parent->rb_left; |
| 168 | } |
| 169 | rb_link_node(&est->node, parent, p); |
| 170 | rb_insert_color(&est->node, &est_root); |
| 171 | } |
| 172 | |
Jarek Poplawski | 244e6c2 | 2008-11-26 15:24:32 -0800 | [diff] [blame] | 173 | static |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 174 | struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 175 | const struct gnet_stats_rate_est64 *rate_est) |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 176 | { |
| 177 | struct rb_node *p = est_root.rb_node; |
| 178 | |
| 179 | while (p) { |
| 180 | struct gen_estimator *e; |
| 181 | |
| 182 | e = rb_entry(p, struct gen_estimator, node); |
| 183 | |
| 184 | if (bstats > e->bstats) |
| 185 | p = p->rb_right; |
| 186 | else if (bstats < e->bstats || rate_est != e->rate_est) |
| 187 | p = p->rb_left; |
| 188 | else |
| 189 | return e; |
| 190 | } |
| 191 | return NULL; |
| 192 | } |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | /** |
| 195 | * gen_new_estimator - create a new rate estimator |
| 196 | * @bstats: basic statistics |
Luis de Bethencourt | e9fc2f0 | 2016-03-19 21:31:38 +0000 | [diff] [blame] | 197 | * @cpu_bstats: bstats per cpu |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | * @rate_est: rate estimator statistics |
| 199 | * @stats_lock: statistics lock |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 200 | * @running: qdisc running seqcount |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | * @opt: rate estimator configuration TLV |
| 202 | * |
| 203 | * Creates a new rate estimator with &bstats as source and &rate_est |
| 204 | * as destination. A new timer with the interval specified in the |
| 205 | * configuration TLV is created. Upon each interval, the latest statistics |
| 206 | * will be read from &bstats and the estimated rate will be stored in |
Masanari Iida | e793c0f | 2014-09-04 23:44:36 +0900 | [diff] [blame] | 207 | * &rate_est with the statistics lock grabbed during this period. |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 208 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | * Returns 0 on success or a negative error code. |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 210 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | */ |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 212 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 213 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 214 | struct gnet_stats_rate_est64 *rate_est, |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 215 | spinlock_t *stats_lock, |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 216 | seqcount_t *running, |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 217 | struct nlattr *opt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { |
| 219 | struct gen_estimator *est; |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 220 | struct gnet_estimator *parm = nla_data(opt); |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 221 | struct gnet_stats_basic_packed b = {0}; |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 222 | int idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 224 | if (nla_len(opt) < sizeof(*parm)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | return -EINVAL; |
| 226 | |
| 227 | if (parm->interval < -2 || parm->interval > 3) |
| 228 | return -EINVAL; |
| 229 | |
Andrew Morton | 77d04bd | 2006-04-07 14:52:59 -0700 | [diff] [blame] | 230 | est = kzalloc(sizeof(*est), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | if (est == NULL) |
| 232 | return -ENOBUFS; |
| 233 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 234 | __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats); |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 235 | |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 236 | idx = parm->interval + 2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | est->bstats = bstats; |
| 238 | est->rate_est = rate_est; |
| 239 | est->stats_lock = stats_lock; |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 240 | est->running = running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | est->ewma_log = parm->ewma_log; |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 242 | est->last_bytes = b.bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | est->avbps = rate_est->bps<<5; |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 244 | est->last_packets = b.packets; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | est->avpps = rate_est->pps<<10; |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 246 | est->cpu_bstats = cpu_bstats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 248 | spin_lock_bh(&est_tree_lock); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 249 | if (!elist[idx].timer.function) { |
| 250 | INIT_LIST_HEAD(&elist[idx].list); |
| 251 | setup_timer(&elist[idx].timer, est_timer, idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 253 | |
| 254 | if (list_empty(&elist[idx].list)) |
Eric Dumazet | 789675e | 2008-01-03 20:40:01 -0800 | [diff] [blame] | 255 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 256 | |
| 257 | list_add_rcu(&est->list, &elist[idx].list); |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 258 | gen_add_node(est); |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 259 | spin_unlock_bh(&est_tree_lock); |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | return 0; |
| 262 | } |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 263 | EXPORT_SYMBOL(gen_new_estimator); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
| 265 | /** |
| 266 | * gen_kill_estimator - remove a rate estimator |
| 267 | * @bstats: basic statistics |
| 268 | * @rate_est: rate estimator statistics |
| 269 | * |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 270 | * Removes the rate estimator specified by &bstats and &rate_est. |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 271 | * |
Eric Dumazet | c7de2cf | 2010-06-09 02:09:23 +0000 | [diff] [blame] | 272 | * Note : Caller should respect an RCU grace period before freeing stats_lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | */ |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 274 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 275 | struct gnet_stats_rate_est64 *rate_est) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 277 | struct gen_estimator *e; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 279 | spin_lock_bh(&est_tree_lock); |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 280 | while ((e = gen_find_node(bstats, rate_est))) { |
| 281 | rb_erase(&e->node, &est_root); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 282 | |
stephen hemminger | 9ca7f87 | 2010-09-08 09:16:28 +0000 | [diff] [blame] | 283 | write_lock(&est_lock); |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 284 | e->bstats = NULL; |
stephen hemminger | 9ca7f87 | 2010-09-08 09:16:28 +0000 | [diff] [blame] | 285 | write_unlock(&est_lock); |
Ranko Zivojnovic | 0929c2d | 2007-07-16 18:28:32 -0700 | [diff] [blame] | 286 | |
Jarek Poplawski | 4db0acf3 | 2008-11-24 15:48:05 -0800 | [diff] [blame] | 287 | list_del_rcu(&e->list); |
Lai Jiangshan | dad178f | 2011-03-18 11:43:26 +0800 | [diff] [blame] | 288 | kfree_rcu(e, e_rcu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 290 | spin_unlock_bh(&est_tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | } |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 292 | EXPORT_SYMBOL(gen_kill_estimator); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
| 294 | /** |
Jarek Poplawski | 9675016 | 2008-01-21 02:36:02 -0800 | [diff] [blame] | 295 | * gen_replace_estimator - replace rate estimator configuration |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | * @bstats: basic statistics |
Luis de Bethencourt | e9fc2f0 | 2016-03-19 21:31:38 +0000 | [diff] [blame] | 297 | * @cpu_bstats: bstats per cpu |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | * @rate_est: rate estimator statistics |
| 299 | * @stats_lock: statistics lock |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 300 | * @running: qdisc running seqcount (might be NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | * @opt: rate estimator configuration TLV |
| 302 | * |
| 303 | * Replaces the configuration of a rate estimator by calling |
| 304 | * gen_kill_estimator() and gen_new_estimator(). |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 305 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | * Returns 0 on success or a negative error code. |
| 307 | */ |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 308 | int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 309 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 310 | struct gnet_stats_rate_est64 *rate_est, |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 311 | spinlock_t *stats_lock, |
| 312 | seqcount_t *running, struct nlattr *opt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { |
Jarek Poplawski | 9675016 | 2008-01-21 02:36:02 -0800 | [diff] [blame] | 314 | gen_kill_estimator(bstats, rate_est); |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 315 | return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | EXPORT_SYMBOL(gen_replace_estimator); |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 318 | |
| 319 | /** |
| 320 | * gen_estimator_active - test if estimator is currently in use |
Jarek Poplawski | 244e6c2 | 2008-11-26 15:24:32 -0800 | [diff] [blame] | 321 | * @bstats: basic statistics |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 322 | * @rate_est: rate estimator statistics |
| 323 | * |
Jarek Poplawski | 244e6c2 | 2008-11-26 15:24:32 -0800 | [diff] [blame] | 324 | * Returns true if estimator is active, and false if not. |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 325 | */ |
Eric Dumazet | c1a8f1f | 2009-08-16 09:36:49 +0000 | [diff] [blame] | 326 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 327 | const struct gnet_stats_rate_est64 *rate_est) |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 328 | { |
Eric Dumazet | ae638c4 | 2010-06-08 23:39:10 +0000 | [diff] [blame] | 329 | bool res; |
| 330 | |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 331 | ASSERT_RTNL(); |
| 332 | |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 333 | spin_lock_bh(&est_tree_lock); |
Eric Dumazet | ae638c4 | 2010-06-08 23:39:10 +0000 | [diff] [blame] | 334 | res = gen_find_node(bstats, rate_est) != NULL; |
Jarek Poplawski | 0b5d404 | 2010-09-02 13:22:11 -0700 | [diff] [blame] | 335 | spin_unlock_bh(&est_tree_lock); |
Eric Dumazet | ae638c4 | 2010-06-08 23:39:10 +0000 | [diff] [blame] | 336 | |
| 337 | return res; |
Stephen Hemminger | c1b5687 | 2008-11-25 21:14:06 -0800 | [diff] [blame] | 338 | } |
| 339 | EXPORT_SYMBOL(gen_estimator_active); |