Loading...
1/*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
7#include <linux/rculist.h>
8#include <linux/blk-mq.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12#include "blk.h"
13
14struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
17 bool enable_accounting;
18};
19
20static void blk_stat_init(struct blk_rq_stat *stat)
21{
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
24 stat->batch = 0;
25}
26
27/* src is a per-cpu stat, mean isn't initialized */
28static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29{
30 if (!src->nr_samples)
31 return;
32
33 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
39 dst->nr_samples += src->nr_samples;
40}
41
42static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
43{
44 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
46 stat->batch += value;
47 stat->nr_samples++;
48}
49
50void blk_stat_add(struct request *rq)
51{
52 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket;
56 u64 now, value;
57
58 now = __blk_stat_time(ktime_to_ns(ktime_get()));
59 if (now < blk_stat_time(&rq->issue_stat))
60 return;
61
62 value = now - blk_stat_time(&rq->issue_stat);
63
64 blk_throtl_stat_add(rq, value);
65
66 rcu_read_lock();
67 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
68 if (!blk_stat_is_active(cb))
69 continue;
70
71 bucket = cb->bucket_fn(rq);
72 if (bucket < 0)
73 continue;
74
75 stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
76 __blk_stat_add(stat, value);
77 put_cpu_ptr(cb->cpu_stat);
78 }
79 rcu_read_unlock();
80}
81
82static void blk_stat_timer_fn(struct timer_list *t)
83{
84 struct blk_stat_callback *cb = from_timer(cb, t, timer);
85 unsigned int bucket;
86 int cpu;
87
88 for (bucket = 0; bucket < cb->buckets; bucket++)
89 blk_stat_init(&cb->stat[bucket]);
90
91 for_each_online_cpu(cpu) {
92 struct blk_rq_stat *cpu_stat;
93
94 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
95 for (bucket = 0; bucket < cb->buckets; bucket++) {
96 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
97 blk_stat_init(&cpu_stat[bucket]);
98 }
99 }
100
101 cb->timer_fn(cb);
102}
103
104struct blk_stat_callback *
105blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
106 int (*bucket_fn)(const struct request *),
107 unsigned int buckets, void *data)
108{
109 struct blk_stat_callback *cb;
110
111 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
112 if (!cb)
113 return NULL;
114
115 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
116 GFP_KERNEL);
117 if (!cb->stat) {
118 kfree(cb);
119 return NULL;
120 }
121 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
122 __alignof__(struct blk_rq_stat));
123 if (!cb->cpu_stat) {
124 kfree(cb->stat);
125 kfree(cb);
126 return NULL;
127 }
128
129 cb->timer_fn = timer_fn;
130 cb->bucket_fn = bucket_fn;
131 cb->data = data;
132 cb->buckets = buckets;
133 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
134
135 return cb;
136}
137EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
138
139void blk_stat_add_callback(struct request_queue *q,
140 struct blk_stat_callback *cb)
141{
142 unsigned int bucket;
143 int cpu;
144
145 for_each_possible_cpu(cpu) {
146 struct blk_rq_stat *cpu_stat;
147
148 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
149 for (bucket = 0; bucket < cb->buckets; bucket++)
150 blk_stat_init(&cpu_stat[bucket]);
151 }
152
153 spin_lock(&q->stats->lock);
154 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
155 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
156 spin_unlock(&q->stats->lock);
157}
158EXPORT_SYMBOL_GPL(blk_stat_add_callback);
159
160void blk_stat_remove_callback(struct request_queue *q,
161 struct blk_stat_callback *cb)
162{
163 spin_lock(&q->stats->lock);
164 list_del_rcu(&cb->list);
165 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
166 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
167 spin_unlock(&q->stats->lock);
168
169 del_timer_sync(&cb->timer);
170}
171EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
172
173static void blk_stat_free_callback_rcu(struct rcu_head *head)
174{
175 struct blk_stat_callback *cb;
176
177 cb = container_of(head, struct blk_stat_callback, rcu);
178 free_percpu(cb->cpu_stat);
179 kfree(cb->stat);
180 kfree(cb);
181}
182
183void blk_stat_free_callback(struct blk_stat_callback *cb)
184{
185 if (cb)
186 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
187}
188EXPORT_SYMBOL_GPL(blk_stat_free_callback);
189
190void blk_stat_enable_accounting(struct request_queue *q)
191{
192 spin_lock(&q->stats->lock);
193 q->stats->enable_accounting = true;
194 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
195 spin_unlock(&q->stats->lock);
196}
197
198struct blk_queue_stats *blk_alloc_queue_stats(void)
199{
200 struct blk_queue_stats *stats;
201
202 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
203 if (!stats)
204 return NULL;
205
206 INIT_LIST_HEAD(&stats->callbacks);
207 spin_lock_init(&stats->lock);
208 stats->enable_accounting = false;
209
210 return stats;
211}
212
213void blk_free_queue_stats(struct blk_queue_stats *stats)
214{
215 if (!stats)
216 return;
217
218 WARN_ON(!list_empty(&stats->callbacks));
219
220 kfree(stats);
221}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block stat tracking code
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7#include <linux/kernel.h>
8#include <linux/rculist.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12#include "blk.h"
13
14struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
17 int accounting;
18};
19
20void blk_rq_stat_init(struct blk_rq_stat *stat)
21{
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
24 stat->batch = 0;
25}
26
27/* src is a per-cpu stat, mean isn't initialized */
28void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29{
30 if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
31 return;
32
33 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
39 dst->nr_samples += src->nr_samples;
40}
41
42void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43{
44 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
46 stat->batch += value;
47 stat->nr_samples++;
48}
49
50void blk_stat_add(struct request *rq, u64 now)
51{
52 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket, cpu;
56 u64 value;
57
58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59
60 rcu_read_lock();
61 cpu = get_cpu();
62 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
63 if (!blk_stat_is_active(cb))
64 continue;
65
66 bucket = cb->bucket_fn(rq);
67 if (bucket < 0)
68 continue;
69
70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
71 blk_rq_stat_add(stat, value);
72 }
73 put_cpu();
74 rcu_read_unlock();
75}
76
77static void blk_stat_timer_fn(struct timer_list *t)
78{
79 struct blk_stat_callback *cb = from_timer(cb, t, timer);
80 unsigned int bucket;
81 int cpu;
82
83 for (bucket = 0; bucket < cb->buckets; bucket++)
84 blk_rq_stat_init(&cb->stat[bucket]);
85
86 for_each_online_cpu(cpu) {
87 struct blk_rq_stat *cpu_stat;
88
89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
90 for (bucket = 0; bucket < cb->buckets; bucket++) {
91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
92 blk_rq_stat_init(&cpu_stat[bucket]);
93 }
94 }
95
96 cb->timer_fn(cb);
97}
98
99struct blk_stat_callback *
100blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
101 int (*bucket_fn)(const struct request *),
102 unsigned int buckets, void *data)
103{
104 struct blk_stat_callback *cb;
105
106 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
107 if (!cb)
108 return NULL;
109
110 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
111 GFP_KERNEL);
112 if (!cb->stat) {
113 kfree(cb);
114 return NULL;
115 }
116 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
117 __alignof__(struct blk_rq_stat));
118 if (!cb->cpu_stat) {
119 kfree(cb->stat);
120 kfree(cb);
121 return NULL;
122 }
123
124 cb->timer_fn = timer_fn;
125 cb->bucket_fn = bucket_fn;
126 cb->data = data;
127 cb->buckets = buckets;
128 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
129
130 return cb;
131}
132
133void blk_stat_add_callback(struct request_queue *q,
134 struct blk_stat_callback *cb)
135{
136 unsigned int bucket;
137 unsigned long flags;
138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 struct blk_rq_stat *cpu_stat;
142
143 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
144 for (bucket = 0; bucket < cb->buckets; bucket++)
145 blk_rq_stat_init(&cpu_stat[bucket]);
146 }
147
148 spin_lock_irqsave(&q->stats->lock, flags);
149 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
150 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
151 spin_unlock_irqrestore(&q->stats->lock, flags);
152}
153
154void blk_stat_remove_callback(struct request_queue *q,
155 struct blk_stat_callback *cb)
156{
157 unsigned long flags;
158
159 spin_lock_irqsave(&q->stats->lock, flags);
160 list_del_rcu(&cb->list);
161 if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
163 spin_unlock_irqrestore(&q->stats->lock, flags);
164
165 del_timer_sync(&cb->timer);
166}
167
168static void blk_stat_free_callback_rcu(struct rcu_head *head)
169{
170 struct blk_stat_callback *cb;
171
172 cb = container_of(head, struct blk_stat_callback, rcu);
173 free_percpu(cb->cpu_stat);
174 kfree(cb->stat);
175 kfree(cb);
176}
177
178void blk_stat_free_callback(struct blk_stat_callback *cb)
179{
180 if (cb)
181 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
182}
183
184void blk_stat_disable_accounting(struct request_queue *q)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&q->stats->lock, flags);
189 if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
190 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
191 spin_unlock_irqrestore(&q->stats->lock, flags);
192}
193EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
194
195void blk_stat_enable_accounting(struct request_queue *q)
196{
197 unsigned long flags;
198
199 spin_lock_irqsave(&q->stats->lock, flags);
200 if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
201 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
202 spin_unlock_irqrestore(&q->stats->lock, flags);
203}
204EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
205
206struct blk_queue_stats *blk_alloc_queue_stats(void)
207{
208 struct blk_queue_stats *stats;
209
210 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
211 if (!stats)
212 return NULL;
213
214 INIT_LIST_HEAD(&stats->callbacks);
215 spin_lock_init(&stats->lock);
216 stats->accounting = 0;
217
218 return stats;
219}
220
221void blk_free_queue_stats(struct blk_queue_stats *stats)
222{
223 if (!stats)
224 return;
225
226 WARN_ON(!list_empty(&stats->callbacks));
227
228 kfree(stats);
229}