blob: eaf60097bbe179fb0d8e0860fbf72359cc7fd265 [file] [log] [blame]
Christoph Hellwig3dcf60bc2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboecf43e6b2016-11-07 21:32:37 -07002/*
3 * Block stat tracking code
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7#include <linux/kernel.h>
Omar Sandoval34dbad52017-03-21 08:56:08 -07008#include <linux/rculist.h>
Jens Axboecf43e6b2016-11-07 21:32:37 -07009
10#include "blk-stat.h"
11#include "blk-mq.h"
Shaohua Lib9147dd2017-03-27 15:19:42 -070012#include "blk.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070013
Omar Sandoval34dbad52017-03-21 08:56:08 -070014struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
Jens Axboe68497092021-12-14 17:23:05 -070017 int accounting;
Omar Sandoval34dbad52017-03-21 08:56:08 -070018};
19
Josef Bacik2ecbf452018-07-03 11:14:57 -040020void blk_rq_stat_init(struct blk_rq_stat *stat)
Omar Sandoval34dbad52017-03-21 08:56:08 -070021{
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
Shaohua Lieca8b532017-10-06 17:55:59 -070024 stat->batch = 0;
Omar Sandoval34dbad52017-03-21 08:56:08 -070025}
26
Shaohua Lieca8b532017-10-06 17:55:59 -070027/* src is a per-cpu stat, mean isn't initialized */
Josef Bacik2ecbf452018-07-03 11:14:57 -040028void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
Jens Axboecf43e6b2016-11-07 21:32:37 -070029{
Roman Smirnov93f52fb2024-03-05 16:45:09 +030030 if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
Jens Axboecf43e6b2016-11-07 21:32:37 -070031 return;
32
Jens Axboecf43e6b2016-11-07 21:32:37 -070033 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
Shaohua Lieca8b532017-10-06 17:55:59 -070036 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
Jens Axboecf43e6b2016-11-07 21:32:37 -070039 dst->nr_samples += src->nr_samples;
40}
41
Josef Bacik2ecbf452018-07-03 11:14:57 -040042void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
Jens Axboecf43e6b2016-11-07 21:32:37 -070043{
Omar Sandoval34dbad52017-03-21 08:56:08 -070044 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
Jens Axboecf43e6b2016-11-07 21:32:37 -070046 stat->batch += value;
Shaohua Lieca8b532017-10-06 17:55:59 -070047 stat->nr_samples++;
Jens Axboecf43e6b2016-11-07 21:32:37 -070048}
49
Omar Sandoval522a7772018-05-09 02:08:53 -070050void blk_stat_add(struct request *rq, u64 now)
Jens Axboecf43e6b2016-11-07 21:32:37 -070051{
Omar Sandoval34dbad52017-03-21 08:56:08 -070052 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
Pavel Begunkov8148f0b2019-10-08 00:16:51 +030055 int bucket, cpu;
Omar Sandoval522a7772018-05-09 02:08:53 -070056 u64 value;
Jens Axboecf43e6b2016-11-07 21:32:37 -070057
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070058 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
Omar Sandoval34dbad52017-03-21 08:56:08 -070059
60 rcu_read_lock();
Pavel Begunkov8148f0b2019-10-08 00:16:51 +030061 cpu = get_cpu();
Omar Sandoval34dbad52017-03-21 08:56:08 -070062 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
Jens Axboed3738122017-05-09 11:39:56 -060063 if (!blk_stat_is_active(cb))
64 continue;
65
66 bucket = cb->bucket_fn(rq);
67 if (bucket < 0)
68 continue;
69
Pavel Begunkov8148f0b2019-10-08 00:16:51 +030070 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
Josef Bacik2ecbf452018-07-03 11:14:57 -040071 blk_rq_stat_add(stat, value);
Jens Axboecf43e6b2016-11-07 21:32:37 -070072 }
Pavel Begunkov8148f0b2019-10-08 00:16:51 +030073 put_cpu();
Omar Sandoval34dbad52017-03-21 08:56:08 -070074 rcu_read_unlock();
Jens Axboecf43e6b2016-11-07 21:32:37 -070075}
76
Kees Cooke99e88a2017-10-16 14:43:17 -070077static void blk_stat_timer_fn(struct timer_list *t)
Jens Axboecf43e6b2016-11-07 21:32:37 -070078{
Kees Cooke99e88a2017-10-16 14:43:17 -070079 struct blk_stat_callback *cb = from_timer(cb, t, timer);
Omar Sandoval34dbad52017-03-21 08:56:08 -070080 unsigned int bucket;
81 int cpu;
Jens Axboecf43e6b2016-11-07 21:32:37 -070082
Omar Sandoval34dbad52017-03-21 08:56:08 -070083 for (bucket = 0; bucket < cb->buckets; bucket++)
Josef Bacik2ecbf452018-07-03 11:14:57 -040084 blk_rq_stat_init(&cb->stat[bucket]);
Omar Sandoval34dbad52017-03-21 08:56:08 -070085
86 for_each_online_cpu(cpu) {
87 struct blk_rq_stat *cpu_stat;
88
89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
90 for (bucket = 0; bucket < cb->buckets; bucket++) {
Josef Bacik2ecbf452018-07-03 11:14:57 -040091 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
92 blk_rq_stat_init(&cpu_stat[bucket]);
Omar Sandoval34dbad52017-03-21 08:56:08 -070093 }
Jens Axboecf43e6b2016-11-07 21:32:37 -070094 }
95
Omar Sandoval34dbad52017-03-21 08:56:08 -070096 cb->timer_fn(cb);
97}
98
99struct blk_stat_callback *
100blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
Stephen Batesa37244e2017-04-20 15:29:16 -0600101 int (*bucket_fn)(const struct request *),
Omar Sandoval34dbad52017-03-21 08:56:08 -0700102 unsigned int buckets, void *data)
103{
104 struct blk_stat_callback *cb;
105
106 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
107 if (!cb)
108 return NULL;
109
110 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
111 GFP_KERNEL);
112 if (!cb->stat) {
113 kfree(cb);
114 return NULL;
115 }
116 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
117 __alignof__(struct blk_rq_stat));
118 if (!cb->cpu_stat) {
119 kfree(cb->stat);
120 kfree(cb);
121 return NULL;
122 }
123
124 cb->timer_fn = timer_fn;
125 cb->bucket_fn = bucket_fn;
126 cb->data = data;
127 cb->buckets = buckets;
Kees Cooke99e88a2017-10-16 14:43:17 -0700128 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700129
130 return cb;
131}
Omar Sandoval34dbad52017-03-21 08:56:08 -0700132
133void blk_stat_add_callback(struct request_queue *q,
134 struct blk_stat_callback *cb)
135{
136 unsigned int bucket;
Tejun Heoe11d80a2020-09-01 14:52:32 -0400137 unsigned long flags;
Omar Sandoval34dbad52017-03-21 08:56:08 -0700138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 struct blk_rq_stat *cpu_stat;
142
143 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
144 for (bucket = 0; bucket < cb->buckets; bucket++)
Josef Bacik2ecbf452018-07-03 11:14:57 -0400145 blk_rq_stat_init(&cpu_stat[bucket]);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700146 }
147
Tejun Heoe11d80a2020-09-01 14:52:32 -0400148 spin_lock_irqsave(&q->stats->lock, flags);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700149 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
Bart Van Assche7dfdbc72018-03-07 17:10:05 -0800150 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
Tejun Heoe11d80a2020-09-01 14:52:32 -0400151 spin_unlock_irqrestore(&q->stats->lock, flags);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700152}
Omar Sandoval34dbad52017-03-21 08:56:08 -0700153
154void blk_stat_remove_callback(struct request_queue *q,
155 struct blk_stat_callback *cb)
156{
Tejun Heoe11d80a2020-09-01 14:52:32 -0400157 unsigned long flags;
158
159 spin_lock_irqsave(&q->stats->lock, flags);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700160 list_del_rcu(&cb->list);
Jens Axboe68497092021-12-14 17:23:05 -0700161 if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
Bart Van Assche7dfdbc72018-03-07 17:10:05 -0800162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
Tejun Heoe11d80a2020-09-01 14:52:32 -0400163 spin_unlock_irqrestore(&q->stats->lock, flags);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700164
165 del_timer_sync(&cb->timer);
166}
Omar Sandoval34dbad52017-03-21 08:56:08 -0700167
168static void blk_stat_free_callback_rcu(struct rcu_head *head)
169{
170 struct blk_stat_callback *cb;
171
172 cb = container_of(head, struct blk_stat_callback, rcu);
173 free_percpu(cb->cpu_stat);
174 kfree(cb->stat);
175 kfree(cb);
176}
177
178void blk_stat_free_callback(struct blk_stat_callback *cb)
179{
Jens Axboea83b5762017-03-21 17:20:01 -0600180 if (cb)
181 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700182}
Omar Sandoval34dbad52017-03-21 08:56:08 -0700183
Jens Axboe68497092021-12-14 17:23:05 -0700184void blk_stat_disable_accounting(struct request_queue *q)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&q->stats->lock, flags);
Chengming Zhou20de7652023-04-13 14:28:04 +0800189 if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
Jens Axboe68497092021-12-14 17:23:05 -0700190 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
191 spin_unlock_irqrestore(&q->stats->lock, flags);
192}
193EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
194
Shaohua Lib9147dd2017-03-27 15:19:42 -0700195void blk_stat_enable_accounting(struct request_queue *q)
196{
Tejun Heoe11d80a2020-09-01 14:52:32 -0400197 unsigned long flags;
198
199 spin_lock_irqsave(&q->stats->lock, flags);
Chengming Zhou20de7652023-04-13 14:28:04 +0800200 if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
Jens Axboe68497092021-12-14 17:23:05 -0700201 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
Tejun Heoe11d80a2020-09-01 14:52:32 -0400202 spin_unlock_irqrestore(&q->stats->lock, flags);
Shaohua Lib9147dd2017-03-27 15:19:42 -0700203}
Omar Sandovalf8232f22018-09-27 15:55:52 -0700204EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
Shaohua Lib9147dd2017-03-27 15:19:42 -0700205
Omar Sandoval34dbad52017-03-21 08:56:08 -0700206struct blk_queue_stats *blk_alloc_queue_stats(void)
207{
208 struct blk_queue_stats *stats;
209
210 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
211 if (!stats)
212 return NULL;
213
214 INIT_LIST_HEAD(&stats->callbacks);
215 spin_lock_init(&stats->lock);
Jens Axboe68497092021-12-14 17:23:05 -0700216 stats->accounting = 0;
Omar Sandoval34dbad52017-03-21 08:56:08 -0700217
218 return stats;
219}
220
221void blk_free_queue_stats(struct blk_queue_stats *stats)
222{
223 if (!stats)
224 return;
225
226 WARN_ON(!list_empty(&stats->callbacks));
227
228 kfree(stats);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700229}