Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Block stat tracking code
  4 *
  5 * Copyright (C) 2016 Jens Axboe
  6 */
  7#include <linux/kernel.h>
  8#include <linux/rculist.h>
  9#include <linux/blk-mq.h>
 10
 11#include "blk-stat.h"
 12#include "blk-mq.h"
 13#include "blk.h"
 14
 15struct blk_queue_stats {
 16	struct list_head callbacks;
 17	spinlock_t lock;
 18	int accounting;
 19};
 20
 21void blk_rq_stat_init(struct blk_rq_stat *stat)
 22{
 23	stat->min = -1ULL;
 24	stat->max = stat->nr_samples = stat->mean = 0;
 25	stat->batch = 0;
 26}
 27
 28/* src is a per-cpu stat, mean isn't initialized */
 29void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
 30{
 31	if (!src->nr_samples)
 32		return;
 33
 34	dst->min = min(dst->min, src->min);
 35	dst->max = max(dst->max, src->max);
 36
 37	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
 38				dst->nr_samples + src->nr_samples);
 39
 40	dst->nr_samples += src->nr_samples;
 41}
 42
 43void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
 44{
 45	stat->min = min(stat->min, value);
 46	stat->max = max(stat->max, value);
 47	stat->batch += value;
 48	stat->nr_samples++;
 49}
 50
 51void blk_stat_add(struct request *rq, u64 now)
 52{
 53	struct request_queue *q = rq->q;
 54	struct blk_stat_callback *cb;
 55	struct blk_rq_stat *stat;
 56	int bucket, cpu;
 57	u64 value;
 58
 59	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
 
 
 
 
 60
 61	blk_throtl_stat_add(rq, value);
 62
 63	rcu_read_lock();
 64	cpu = get_cpu();
 65	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
 66		if (!blk_stat_is_active(cb))
 67			continue;
 68
 69		bucket = cb->bucket_fn(rq);
 70		if (bucket < 0)
 71			continue;
 72
 73		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
 74		blk_rq_stat_add(stat, value);
 
 75	}
 76	put_cpu();
 77	rcu_read_unlock();
 78}
 79
 80static void blk_stat_timer_fn(struct timer_list *t)
 81{
 82	struct blk_stat_callback *cb = from_timer(cb, t, timer);
 83	unsigned int bucket;
 84	int cpu;
 85
 86	for (bucket = 0; bucket < cb->buckets; bucket++)
 87		blk_rq_stat_init(&cb->stat[bucket]);
 88
 89	for_each_online_cpu(cpu) {
 90		struct blk_rq_stat *cpu_stat;
 91
 92		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
 93		for (bucket = 0; bucket < cb->buckets; bucket++) {
 94			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
 95			blk_rq_stat_init(&cpu_stat[bucket]);
 96		}
 97	}
 98
 99	cb->timer_fn(cb);
100}
101
102struct blk_stat_callback *
103blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104			int (*bucket_fn)(const struct request *),
105			unsigned int buckets, void *data)
106{
107	struct blk_stat_callback *cb;
108
109	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
110	if (!cb)
111		return NULL;
112
113	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
114				 GFP_KERNEL);
115	if (!cb->stat) {
116		kfree(cb);
117		return NULL;
118	}
119	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
120				      __alignof__(struct blk_rq_stat));
121	if (!cb->cpu_stat) {
122		kfree(cb->stat);
123		kfree(cb);
124		return NULL;
125	}
126
127	cb->timer_fn = timer_fn;
128	cb->bucket_fn = bucket_fn;
129	cb->data = data;
130	cb->buckets = buckets;
131	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
132
133	return cb;
134}
 
135
136void blk_stat_add_callback(struct request_queue *q,
137			   struct blk_stat_callback *cb)
138{
139	unsigned int bucket;
140	unsigned long flags;
141	int cpu;
142
143	for_each_possible_cpu(cpu) {
144		struct blk_rq_stat *cpu_stat;
145
146		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
147		for (bucket = 0; bucket < cb->buckets; bucket++)
148			blk_rq_stat_init(&cpu_stat[bucket]);
149	}
150
151	spin_lock_irqsave(&q->stats->lock, flags);
152	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
153	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154	spin_unlock_irqrestore(&q->stats->lock, flags);
155}
 
156
157void blk_stat_remove_callback(struct request_queue *q,
158			      struct blk_stat_callback *cb)
159{
160	unsigned long flags;
161
162	spin_lock_irqsave(&q->stats->lock, flags);
163	list_del_rcu(&cb->list);
164	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
165		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166	spin_unlock_irqrestore(&q->stats->lock, flags);
167
168	del_timer_sync(&cb->timer);
169}
 
170
171static void blk_stat_free_callback_rcu(struct rcu_head *head)
172{
173	struct blk_stat_callback *cb;
174
175	cb = container_of(head, struct blk_stat_callback, rcu);
176	free_percpu(cb->cpu_stat);
177	kfree(cb->stat);
178	kfree(cb);
179}
180
181void blk_stat_free_callback(struct blk_stat_callback *cb)
182{
183	if (cb)
184		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
185}
186
187void blk_stat_disable_accounting(struct request_queue *q)
188{
189	unsigned long flags;
190
191	spin_lock_irqsave(&q->stats->lock, flags);
192	if (!--q->stats->accounting)
193		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
194	spin_unlock_irqrestore(&q->stats->lock, flags);
195}
196EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
197
198void blk_stat_enable_accounting(struct request_queue *q)
199{
200	unsigned long flags;
201
202	spin_lock_irqsave(&q->stats->lock, flags);
203	if (!q->stats->accounting++)
204		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
205	spin_unlock_irqrestore(&q->stats->lock, flags);
206}
207EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
208
209struct blk_queue_stats *blk_alloc_queue_stats(void)
210{
211	struct blk_queue_stats *stats;
212
213	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
214	if (!stats)
215		return NULL;
216
217	INIT_LIST_HEAD(&stats->callbacks);
218	spin_lock_init(&stats->lock);
219	stats->accounting = 0;
220
221	return stats;
222}
223
224void blk_free_queue_stats(struct blk_queue_stats *stats)
225{
226	if (!stats)
227		return;
228
229	WARN_ON(!list_empty(&stats->callbacks));
230
231	kfree(stats);
232}
233
234bool blk_stats_alloc_enable(struct request_queue *q)
235{
236	struct blk_rq_stat *poll_stat;
237
238	poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
239				GFP_ATOMIC);
240	if (!poll_stat)
241		return false;
242
243	if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
244		kfree(poll_stat);
245		return true;
246	}
247
248	blk_stat_add_callback(q, q->poll_cb);
249	return false;
250}
v4.17
 
  1/*
  2 * Block stat tracking code
  3 *
  4 * Copyright (C) 2016 Jens Axboe
  5 */
  6#include <linux/kernel.h>
  7#include <linux/rculist.h>
  8#include <linux/blk-mq.h>
  9
 10#include "blk-stat.h"
 11#include "blk-mq.h"
 12#include "blk.h"
 13
 14struct blk_queue_stats {
 15	struct list_head callbacks;
 16	spinlock_t lock;
 17	bool enable_accounting;
 18};
 19
 20static void blk_stat_init(struct blk_rq_stat *stat)
 21{
 22	stat->min = -1ULL;
 23	stat->max = stat->nr_samples = stat->mean = 0;
 24	stat->batch = 0;
 25}
 26
 27/* src is a per-cpu stat, mean isn't initialized */
 28static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
 29{
 30	if (!src->nr_samples)
 31		return;
 32
 33	dst->min = min(dst->min, src->min);
 34	dst->max = max(dst->max, src->max);
 35
 36	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
 37				dst->nr_samples + src->nr_samples);
 38
 39	dst->nr_samples += src->nr_samples;
 40}
 41
 42static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
 43{
 44	stat->min = min(stat->min, value);
 45	stat->max = max(stat->max, value);
 46	stat->batch += value;
 47	stat->nr_samples++;
 48}
 49
 50void blk_stat_add(struct request *rq)
 51{
 52	struct request_queue *q = rq->q;
 53	struct blk_stat_callback *cb;
 54	struct blk_rq_stat *stat;
 55	int bucket;
 56	u64 now, value;
 57
 58	now = __blk_stat_time(ktime_to_ns(ktime_get()));
 59	if (now < blk_stat_time(&rq->issue_stat))
 60		return;
 61
 62	value = now - blk_stat_time(&rq->issue_stat);
 63
 64	blk_throtl_stat_add(rq, value);
 65
 66	rcu_read_lock();
 
 67	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
 68		if (!blk_stat_is_active(cb))
 69			continue;
 70
 71		bucket = cb->bucket_fn(rq);
 72		if (bucket < 0)
 73			continue;
 74
 75		stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
 76		__blk_stat_add(stat, value);
 77		put_cpu_ptr(cb->cpu_stat);
 78	}
 
 79	rcu_read_unlock();
 80}
 81
 82static void blk_stat_timer_fn(struct timer_list *t)
 83{
 84	struct blk_stat_callback *cb = from_timer(cb, t, timer);
 85	unsigned int bucket;
 86	int cpu;
 87
 88	for (bucket = 0; bucket < cb->buckets; bucket++)
 89		blk_stat_init(&cb->stat[bucket]);
 90
 91	for_each_online_cpu(cpu) {
 92		struct blk_rq_stat *cpu_stat;
 93
 94		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
 95		for (bucket = 0; bucket < cb->buckets; bucket++) {
 96			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
 97			blk_stat_init(&cpu_stat[bucket]);
 98		}
 99	}
100
101	cb->timer_fn(cb);
102}
103
104struct blk_stat_callback *
105blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
106			int (*bucket_fn)(const struct request *),
107			unsigned int buckets, void *data)
108{
109	struct blk_stat_callback *cb;
110
111	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
112	if (!cb)
113		return NULL;
114
115	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
116				 GFP_KERNEL);
117	if (!cb->stat) {
118		kfree(cb);
119		return NULL;
120	}
121	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
122				      __alignof__(struct blk_rq_stat));
123	if (!cb->cpu_stat) {
124		kfree(cb->stat);
125		kfree(cb);
126		return NULL;
127	}
128
129	cb->timer_fn = timer_fn;
130	cb->bucket_fn = bucket_fn;
131	cb->data = data;
132	cb->buckets = buckets;
133	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
134
135	return cb;
136}
137EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
138
139void blk_stat_add_callback(struct request_queue *q,
140			   struct blk_stat_callback *cb)
141{
142	unsigned int bucket;
 
143	int cpu;
144
145	for_each_possible_cpu(cpu) {
146		struct blk_rq_stat *cpu_stat;
147
148		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
149		for (bucket = 0; bucket < cb->buckets; bucket++)
150			blk_stat_init(&cpu_stat[bucket]);
151	}
152
153	spin_lock(&q->stats->lock);
154	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
155	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
156	spin_unlock(&q->stats->lock);
157}
158EXPORT_SYMBOL_GPL(blk_stat_add_callback);
159
160void blk_stat_remove_callback(struct request_queue *q,
161			      struct blk_stat_callback *cb)
162{
163	spin_lock(&q->stats->lock);
 
 
164	list_del_rcu(&cb->list);
165	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
166		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
167	spin_unlock(&q->stats->lock);
168
169	del_timer_sync(&cb->timer);
170}
171EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
172
173static void blk_stat_free_callback_rcu(struct rcu_head *head)
174{
175	struct blk_stat_callback *cb;
176
177	cb = container_of(head, struct blk_stat_callback, rcu);
178	free_percpu(cb->cpu_stat);
179	kfree(cb->stat);
180	kfree(cb);
181}
182
183void blk_stat_free_callback(struct blk_stat_callback *cb)
184{
185	if (cb)
186		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
187}
188EXPORT_SYMBOL_GPL(blk_stat_free_callback);
 
 
 
 
 
 
 
 
 
 
189
190void blk_stat_enable_accounting(struct request_queue *q)
191{
192	spin_lock(&q->stats->lock);
193	q->stats->enable_accounting = true;
194	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
195	spin_unlock(&q->stats->lock);
 
 
196}
 
197
198struct blk_queue_stats *blk_alloc_queue_stats(void)
199{
200	struct blk_queue_stats *stats;
201
202	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
203	if (!stats)
204		return NULL;
205
206	INIT_LIST_HEAD(&stats->callbacks);
207	spin_lock_init(&stats->lock);
208	stats->enable_accounting = false;
209
210	return stats;
211}
212
213void blk_free_queue_stats(struct blk_queue_stats *stats)
214{
215	if (!stats)
216		return;
217
218	WARN_ON(!list_empty(&stats->callbacks));
219
220	kfree(stats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221}