blob: 80dbf2f4016e26824bc968115503ca2072933f63 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/gen_estimator.c Simple rate estimator.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08006 * Eric Dumazet <edumazet@google.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Changes:
9 * Jamal Hadi Salim - moved it to net/core and reshulfed
10 * names to make it usable in general net subsystem.
11 */
12
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080013#include <linux/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070014#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/jiffies.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h>
28#include <linux/rtnetlink.h>
29#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080031#include <linux/seqlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
33#include <net/gen_stats.h>
34
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080035/* This code is NOT intended to be used for statistics collection,
36 * its purpose is to provide a base for statistical multiplexing
37 * for controlled load service.
38 * If you need only statistics, run a user level daemon which
39 * periodically reads byte counters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
41
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080042struct net_rate_estimator {
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +000043 struct gnet_stats_basic_packed *bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 spinlock_t *stats_lock;
Eric Dumazetedb09eb2016-06-06 09:37:16 -070045 seqcount_t *running;
John Fastabend22e0f8b2014-09-28 11:52:56 -070046 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080047 u8 ewma_log;
48 u8 intvl_log; /* period : (250ms << intvl_log) */
49
50 seqcount_t seq;
Eric Dumazet1c8dd9c2019-11-06 20:52:40 -080051 u64 last_packets;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080052 u64 last_bytes;
53
54 u64 avpps;
55 u64 avbps;
56
57 unsigned long next_jiffies;
58 struct timer_list timer;
59 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060};
61
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080062static void est_fetch_counters(struct net_rate_estimator *e,
63 struct gnet_stats_basic_packed *b)
64{
Eric Dumazeta5f7add2018-02-22 19:45:27 -080065 memset(b, 0, sizeof(*b));
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080066 if (e->stats_lock)
67 spin_lock(e->stats_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080069 __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080071 if (e->stats_lock)
72 spin_unlock(e->stats_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080074}
Jarek Poplawski4db0acf32008-11-24 15:48:05 -080075
Kees Cooke99e88a2017-10-16 14:43:17 -070076static void est_timer(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Kees Cooke99e88a2017-10-16 14:43:17 -070078 struct net_rate_estimator *est = from_timer(est, t, timer);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080079 struct gnet_stats_basic_packed b;
80 u64 rate, brate;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080082 est_fetch_counters(est, &b);
Eric Dumazetca558e12017-09-13 11:16:45 -070083 brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080084 brate -= (est->avbps >> est->ewma_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Eric Dumazet1c8dd9c2019-11-06 20:52:40 -080086 rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080087 rate -= (est->avpps >> est->ewma_log);
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -070088
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080089 write_seqcount_begin(&est->seq);
90 est->avbps += brate;
91 est->avpps += rate;
92 write_seqcount_end(&est->seq);
John Fastabend22e0f8b2014-09-28 11:52:56 -070093
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080094 est->last_bytes = b.bytes;
95 est->last_packets = b.packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080097 est->next_jiffies += ((HZ/4) << est->intvl_log);
98
99 if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
100 /* Ouch... timer was delayed. */
101 est->next_jiffies = jiffies + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800103 mod_timer(&est->timer, est->next_jiffies);
Jarek Poplawski4db0acf32008-11-24 15:48:05 -0800104}
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/**
107 * gen_new_estimator - create a new rate estimator
108 * @bstats: basic statistics
Luis de Bethencourte9fc2f02016-03-19 21:31:38 +0000109 * @cpu_bstats: bstats per cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 * @rate_est: rate estimator statistics
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300111 * @lock: lock for statistics and control path
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700112 * @running: qdisc running seqcount
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * @opt: rate estimator configuration TLV
114 *
115 * Creates a new rate estimator with &bstats as source and &rate_est
116 * as destination. A new timer with the interval specified in the
117 * configuration TLV is created. Upon each interval, the latest statistics
118 * will be read from &bstats and the estimated rate will be stored in
Masanari Iidae793c0f2014-09-04 23:44:36 +0900119 * &rate_est with the statistics lock grabbed during this period.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900120 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 * Returns 0 on success or a negative error code.
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700122 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 */
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000124int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
John Fastabend22e0f8b2014-09-28 11:52:56 -0700125 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800126 struct net_rate_estimator __rcu **rate_est,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300127 spinlock_t *lock,
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700128 seqcount_t *running,
Patrick McHardy1e904742008-01-22 22:11:17 -0800129 struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
Patrick McHardy1e904742008-01-22 22:11:17 -0800131 struct gnet_estimator *parm = nla_data(opt);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800132 struct net_rate_estimator *old, *est;
133 struct gnet_stats_basic_packed b;
134 int intvl_log;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Patrick McHardy1e904742008-01-22 22:11:17 -0800136 if (nla_len(opt) < sizeof(*parm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return -EINVAL;
138
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800139 /* allowed timer periods are :
140 * -2 : 250ms, -1 : 500ms, 0 : 1 sec
141 * 1 : 2 sec, 2 : 4 sec, 3 : 8 sec
142 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 if (parm->interval < -2 || parm->interval > 3)
144 return -EINVAL;
145
Andrew Morton77d04bd2006-04-07 14:52:59 -0700146 est = kzalloc(sizeof(*est), GFP_KERNEL);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800147 if (!est)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 return -ENOBUFS;
149
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800150 seqcount_init(&est->seq);
151 intvl_log = parm->interval + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 est->bstats = bstats;
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300153 est->stats_lock = lock;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700154 est->running = running;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 est->ewma_log = parm->ewma_log;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800156 est->intvl_log = intvl_log;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700157 est->cpu_bstats = cpu_bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300159 if (lock)
Eric Dumazet40ca54e2018-01-27 10:58:43 -0800160 local_bh_disable();
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800161 est_fetch_counters(est, &b);
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300162 if (lock)
Eric Dumazet40ca54e2018-01-27 10:58:43 -0800163 local_bh_enable();
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800164 est->last_bytes = b.bytes;
165 est->last_packets = b.packets;
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300166
167 if (lock)
168 spin_lock_bh(lock);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800169 old = rcu_dereference_protected(*rate_est, 1);
170 if (old) {
171 del_timer_sync(&old->timer);
172 est->avbps = old->avbps;
173 est->avpps = old->avpps;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 }
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700175
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800176 est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
Kees Cooke99e88a2017-10-16 14:43:17 -0700177 timer_setup(&est->timer, est_timer, 0);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800178 mod_timer(&est->timer, est->next_jiffies);
Jarek Poplawski4db0acf32008-11-24 15:48:05 -0800179
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800180 rcu_assign_pointer(*rate_est, est);
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300181 if (lock)
182 spin_unlock_bh(lock);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800183 if (old)
184 kfree_rcu(old, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 return 0;
186}
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800187EXPORT_SYMBOL(gen_new_estimator);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189/**
190 * gen_kill_estimator - remove a rate estimator
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800191 * @rate_est: rate estimator
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 *
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800193 * Removes the rate estimator.
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700194 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 */
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800196void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800198 struct net_rate_estimator *est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800200 est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
201 if (est) {
202 del_timer_sync(&est->timer);
203 kfree_rcu(est, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
205}
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800206EXPORT_SYMBOL(gen_kill_estimator);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208/**
Jarek Poplawski96750162008-01-21 02:36:02 -0800209 * gen_replace_estimator - replace rate estimator configuration
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * @bstats: basic statistics
Luis de Bethencourte9fc2f02016-03-19 21:31:38 +0000211 * @cpu_bstats: bstats per cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 * @rate_est: rate estimator statistics
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300213 * @lock: lock for statistics and control path
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700214 * @running: qdisc running seqcount (might be NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * @opt: rate estimator configuration TLV
216 *
217 * Replaces the configuration of a rate estimator by calling
218 * gen_kill_estimator() and gen_new_estimator().
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900219 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * Returns 0 on success or a negative error code.
221 */
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000222int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
John Fastabend22e0f8b2014-09-28 11:52:56 -0700223 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800224 struct net_rate_estimator __rcu **rate_est,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300225 spinlock_t *lock,
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700226 seqcount_t *running, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800228 return gen_new_estimator(bstats, cpu_bstats, rate_est,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +0300229 lock, running, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231EXPORT_SYMBOL(gen_replace_estimator);
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800232
233/**
234 * gen_estimator_active - test if estimator is currently in use
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800235 * @rate_est: rate estimator
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800236 *
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800237 * Returns true if estimator is active, and false if not.
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800238 */
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800239bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800240{
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800241 return !!rcu_access_pointer(*rate_est);
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800242}
243EXPORT_SYMBOL(gen_estimator_active);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800244
245bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
246 struct gnet_stats_rate_est64 *sample)
247{
248 struct net_rate_estimator *est;
249 unsigned seq;
250
251 rcu_read_lock();
252 est = rcu_dereference(*rate_est);
253 if (!est) {
254 rcu_read_unlock();
255 return false;
256 }
257
258 do {
259 seq = read_seqcount_begin(&est->seq);
260 sample->bps = est->avbps >> 8;
261 sample->pps = est->avpps >> 8;
262 } while (read_seqcount_retry(&est->seq, seq));
263
264 rcu_read_unlock();
265 return true;
266}
267EXPORT_SYMBOL(gen_estimator_read);