Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/errno.h>
3#include <linux/numa.h>
4#include <linux/slab.h>
5#include <linux/rculist.h>
6#include <linux/threads.h>
7#include <linux/preempt.h>
8#include <linux/irqflags.h>
9#include <linux/vmalloc.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/device-mapper.h>
13
14#include "dm-core.h"
15#include "dm-stats.h"
16
17#define DM_MSG_PREFIX "stats"
18
19static int dm_stat_need_rcu_barrier;
20
21/*
22 * Using 64-bit values to avoid overflow (which is a
23 * problem that block/genhd.c's IO accounting has).
24 */
25struct dm_stat_percpu {
26 unsigned long long sectors[2];
27 unsigned long long ios[2];
28 unsigned long long merges[2];
29 unsigned long long ticks[2];
30 unsigned long long io_ticks[2];
31 unsigned long long io_ticks_total;
32 unsigned long long time_in_queue;
33 unsigned long long *histogram;
34};
35
36struct dm_stat_shared {
37 atomic_t in_flight[2];
38 unsigned long long stamp;
39 struct dm_stat_percpu tmp;
40};
41
42struct dm_stat {
43 struct list_head list_entry;
44 int id;
45 unsigned stat_flags;
46 size_t n_entries;
47 sector_t start;
48 sector_t end;
49 sector_t step;
50 unsigned n_histogram_entries;
51 unsigned long long *histogram_boundaries;
52 const char *program_id;
53 const char *aux_data;
54 struct rcu_head rcu_head;
55 size_t shared_alloc_size;
56 size_t percpu_alloc_size;
57 size_t histogram_alloc_size;
58 struct dm_stat_percpu *stat_percpu[NR_CPUS];
59 struct dm_stat_shared stat_shared[];
60};
61
62#define STAT_PRECISE_TIMESTAMPS 1
63
64struct dm_stats_last_position {
65 sector_t last_sector;
66 unsigned last_rw;
67};
68
69/*
70 * A typo on the command line could possibly make the kernel run out of memory
71 * and crash. To prevent the crash we account all used memory. We fail if we
72 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
73 */
74#define DM_STATS_MEMORY_FACTOR 4
75#define DM_STATS_VMALLOC_FACTOR 2
76
77static DEFINE_SPINLOCK(shared_memory_lock);
78
79static unsigned long shared_memory_amount;
80
81static bool __check_shared_memory(size_t alloc_size)
82{
83 size_t a;
84
85 a = shared_memory_amount + alloc_size;
86 if (a < shared_memory_amount)
87 return false;
88 if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
89 return false;
90#ifdef CONFIG_MMU
91 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
92 return false;
93#endif
94 return true;
95}
96
97static bool check_shared_memory(size_t alloc_size)
98{
99 bool ret;
100
101 spin_lock_irq(&shared_memory_lock);
102
103 ret = __check_shared_memory(alloc_size);
104
105 spin_unlock_irq(&shared_memory_lock);
106
107 return ret;
108}
109
110static bool claim_shared_memory(size_t alloc_size)
111{
112 spin_lock_irq(&shared_memory_lock);
113
114 if (!__check_shared_memory(alloc_size)) {
115 spin_unlock_irq(&shared_memory_lock);
116 return false;
117 }
118
119 shared_memory_amount += alloc_size;
120
121 spin_unlock_irq(&shared_memory_lock);
122
123 return true;
124}
125
126static void free_shared_memory(size_t alloc_size)
127{
128 unsigned long flags;
129
130 spin_lock_irqsave(&shared_memory_lock, flags);
131
132 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
133 spin_unlock_irqrestore(&shared_memory_lock, flags);
134 DMCRIT("Memory usage accounting bug.");
135 return;
136 }
137
138 shared_memory_amount -= alloc_size;
139
140 spin_unlock_irqrestore(&shared_memory_lock, flags);
141}
142
143static void *dm_kvzalloc(size_t alloc_size, int node)
144{
145 void *p;
146
147 if (!claim_shared_memory(alloc_size))
148 return NULL;
149
150 p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
151 if (p)
152 return p;
153
154 free_shared_memory(alloc_size);
155
156 return NULL;
157}
158
159static void dm_kvfree(void *ptr, size_t alloc_size)
160{
161 if (!ptr)
162 return;
163
164 free_shared_memory(alloc_size);
165
166 kvfree(ptr);
167}
168
169static void dm_stat_free(struct rcu_head *head)
170{
171 int cpu;
172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
173
174 kfree(s->histogram_boundaries);
175 kfree(s->program_id);
176 kfree(s->aux_data);
177 for_each_possible_cpu(cpu) {
178 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
179 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
180 }
181 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
182 dm_kvfree(s, s->shared_alloc_size);
183}
184
185static int dm_stat_in_flight(struct dm_stat_shared *shared)
186{
187 return atomic_read(&shared->in_flight[READ]) +
188 atomic_read(&shared->in_flight[WRITE]);
189}
190
191void dm_stats_init(struct dm_stats *stats)
192{
193 int cpu;
194 struct dm_stats_last_position *last;
195
196 mutex_init(&stats->mutex);
197 INIT_LIST_HEAD(&stats->list);
198 stats->last = alloc_percpu(struct dm_stats_last_position);
199 for_each_possible_cpu(cpu) {
200 last = per_cpu_ptr(stats->last, cpu);
201 last->last_sector = (sector_t)ULLONG_MAX;
202 last->last_rw = UINT_MAX;
203 }
204}
205
206void dm_stats_cleanup(struct dm_stats *stats)
207{
208 size_t ni;
209 struct dm_stat *s;
210 struct dm_stat_shared *shared;
211
212 while (!list_empty(&stats->list)) {
213 s = container_of(stats->list.next, struct dm_stat, list_entry);
214 list_del(&s->list_entry);
215 for (ni = 0; ni < s->n_entries; ni++) {
216 shared = &s->stat_shared[ni];
217 if (WARN_ON(dm_stat_in_flight(shared))) {
218 DMCRIT("leaked in-flight counter at index %lu "
219 "(start %llu, end %llu, step %llu): reads %d, writes %d",
220 (unsigned long)ni,
221 (unsigned long long)s->start,
222 (unsigned long long)s->end,
223 (unsigned long long)s->step,
224 atomic_read(&shared->in_flight[READ]),
225 atomic_read(&shared->in_flight[WRITE]));
226 }
227 }
228 dm_stat_free(&s->rcu_head);
229 }
230 free_percpu(stats->last);
231 mutex_destroy(&stats->mutex);
232}
233
234static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
235 sector_t step, unsigned stat_flags,
236 unsigned n_histogram_entries,
237 unsigned long long *histogram_boundaries,
238 const char *program_id, const char *aux_data,
239 void (*suspend_callback)(struct mapped_device *),
240 void (*resume_callback)(struct mapped_device *),
241 struct mapped_device *md)
242{
243 struct list_head *l;
244 struct dm_stat *s, *tmp_s;
245 sector_t n_entries;
246 size_t ni;
247 size_t shared_alloc_size;
248 size_t percpu_alloc_size;
249 size_t histogram_alloc_size;
250 struct dm_stat_percpu *p;
251 int cpu;
252 int ret_id;
253 int r;
254
255 if (end < start || !step)
256 return -EINVAL;
257
258 n_entries = end - start;
259 if (dm_sector_div64(n_entries, step))
260 n_entries++;
261
262 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
263 return -EOVERFLOW;
264
265 shared_alloc_size = struct_size(s, stat_shared, n_entries);
266 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
267 return -EOVERFLOW;
268
269 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
270 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
271 return -EOVERFLOW;
272
273 histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
274 if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
275 return -EOVERFLOW;
276
277 if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
278 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
279 return -ENOMEM;
280
281 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
282 if (!s)
283 return -ENOMEM;
284
285 s->stat_flags = stat_flags;
286 s->n_entries = n_entries;
287 s->start = start;
288 s->end = end;
289 s->step = step;
290 s->shared_alloc_size = shared_alloc_size;
291 s->percpu_alloc_size = percpu_alloc_size;
292 s->histogram_alloc_size = histogram_alloc_size;
293
294 s->n_histogram_entries = n_histogram_entries;
295 s->histogram_boundaries = kmemdup(histogram_boundaries,
296 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
297 if (!s->histogram_boundaries) {
298 r = -ENOMEM;
299 goto out;
300 }
301
302 s->program_id = kstrdup(program_id, GFP_KERNEL);
303 if (!s->program_id) {
304 r = -ENOMEM;
305 goto out;
306 }
307 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
308 if (!s->aux_data) {
309 r = -ENOMEM;
310 goto out;
311 }
312
313 for (ni = 0; ni < n_entries; ni++) {
314 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
315 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
316 }
317
318 if (s->n_histogram_entries) {
319 unsigned long long *hi;
320 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
321 if (!hi) {
322 r = -ENOMEM;
323 goto out;
324 }
325 for (ni = 0; ni < n_entries; ni++) {
326 s->stat_shared[ni].tmp.histogram = hi;
327 hi += s->n_histogram_entries + 1;
328 }
329 }
330
331 for_each_possible_cpu(cpu) {
332 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
333 if (!p) {
334 r = -ENOMEM;
335 goto out;
336 }
337 s->stat_percpu[cpu] = p;
338 if (s->n_histogram_entries) {
339 unsigned long long *hi;
340 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
341 if (!hi) {
342 r = -ENOMEM;
343 goto out;
344 }
345 for (ni = 0; ni < n_entries; ni++) {
346 p[ni].histogram = hi;
347 hi += s->n_histogram_entries + 1;
348 }
349 }
350 }
351
352 /*
353 * Suspend/resume to make sure there is no i/o in flight,
354 * so that newly created statistics will be exact.
355 *
356 * (note: we couldn't suspend earlier because we must not
357 * allocate memory while suspended)
358 */
359 suspend_callback(md);
360
361 mutex_lock(&stats->mutex);
362 s->id = 0;
363 list_for_each(l, &stats->list) {
364 tmp_s = container_of(l, struct dm_stat, list_entry);
365 if (WARN_ON(tmp_s->id < s->id)) {
366 r = -EINVAL;
367 goto out_unlock_resume;
368 }
369 if (tmp_s->id > s->id)
370 break;
371 if (unlikely(s->id == INT_MAX)) {
372 r = -ENFILE;
373 goto out_unlock_resume;
374 }
375 s->id++;
376 }
377 ret_id = s->id;
378 list_add_tail_rcu(&s->list_entry, l);
379 mutex_unlock(&stats->mutex);
380
381 resume_callback(md);
382
383 return ret_id;
384
385out_unlock_resume:
386 mutex_unlock(&stats->mutex);
387 resume_callback(md);
388out:
389 dm_stat_free(&s->rcu_head);
390 return r;
391}
392
393static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
394{
395 struct dm_stat *s;
396
397 list_for_each_entry(s, &stats->list, list_entry) {
398 if (s->id > id)
399 break;
400 if (s->id == id)
401 return s;
402 }
403
404 return NULL;
405}
406
407static int dm_stats_delete(struct dm_stats *stats, int id)
408{
409 struct dm_stat *s;
410 int cpu;
411
412 mutex_lock(&stats->mutex);
413
414 s = __dm_stats_find(stats, id);
415 if (!s) {
416 mutex_unlock(&stats->mutex);
417 return -ENOENT;
418 }
419
420 list_del_rcu(&s->list_entry);
421 mutex_unlock(&stats->mutex);
422
423 /*
424 * vfree can't be called from RCU callback
425 */
426 for_each_possible_cpu(cpu)
427 if (is_vmalloc_addr(s->stat_percpu) ||
428 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
429 goto do_sync_free;
430 if (is_vmalloc_addr(s) ||
431 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
432do_sync_free:
433 synchronize_rcu_expedited();
434 dm_stat_free(&s->rcu_head);
435 } else {
436 WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
437 call_rcu(&s->rcu_head, dm_stat_free);
438 }
439 return 0;
440}
441
442static int dm_stats_list(struct dm_stats *stats, const char *program,
443 char *result, unsigned maxlen)
444{
445 struct dm_stat *s;
446 sector_t len;
447 unsigned sz = 0;
448
449 /*
450 * Output format:
451 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
452 */
453
454 mutex_lock(&stats->mutex);
455 list_for_each_entry(s, &stats->list, list_entry) {
456 if (!program || !strcmp(program, s->program_id)) {
457 len = s->end - s->start;
458 DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
459 (unsigned long long)s->start,
460 (unsigned long long)len,
461 (unsigned long long)s->step,
462 s->program_id,
463 s->aux_data);
464 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
465 DMEMIT(" precise_timestamps");
466 if (s->n_histogram_entries) {
467 unsigned i;
468 DMEMIT(" histogram:");
469 for (i = 0; i < s->n_histogram_entries; i++) {
470 if (i)
471 DMEMIT(",");
472 DMEMIT("%llu", s->histogram_boundaries[i]);
473 }
474 }
475 DMEMIT("\n");
476 }
477 }
478 mutex_unlock(&stats->mutex);
479
480 return 1;
481}
482
483static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
484 struct dm_stat_percpu *p)
485{
486 /*
487 * This is racy, but so is part_round_stats_single.
488 */
489 unsigned long long now, difference;
490 unsigned in_flight_read, in_flight_write;
491
492 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
493 now = jiffies;
494 else
495 now = ktime_to_ns(ktime_get());
496
497 difference = now - shared->stamp;
498 if (!difference)
499 return;
500
501 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
502 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
503 if (in_flight_read)
504 p->io_ticks[READ] += difference;
505 if (in_flight_write)
506 p->io_ticks[WRITE] += difference;
507 if (in_flight_read + in_flight_write) {
508 p->io_ticks_total += difference;
509 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
510 }
511 shared->stamp = now;
512}
513
514static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
515 int idx, sector_t len,
516 struct dm_stats_aux *stats_aux, bool end,
517 unsigned long duration_jiffies)
518{
519 struct dm_stat_shared *shared = &s->stat_shared[entry];
520 struct dm_stat_percpu *p;
521
522 /*
523 * For strict correctness we should use local_irq_save/restore
524 * instead of preempt_disable/enable.
525 *
526 * preempt_disable/enable is racy if the driver finishes bios
527 * from non-interrupt context as well as from interrupt context
528 * or from more different interrupts.
529 *
530 * On 64-bit architectures the race only results in not counting some
531 * events, so it is acceptable. On 32-bit architectures the race could
532 * cause the counter going off by 2^32, so we need to do proper locking
533 * there.
534 *
535 * part_stat_lock()/part_stat_unlock() have this race too.
536 */
537#if BITS_PER_LONG == 32
538 unsigned long flags;
539 local_irq_save(flags);
540#else
541 preempt_disable();
542#endif
543 p = &s->stat_percpu[smp_processor_id()][entry];
544
545 if (!end) {
546 dm_stat_round(s, shared, p);
547 atomic_inc(&shared->in_flight[idx]);
548 } else {
549 unsigned long long duration;
550 dm_stat_round(s, shared, p);
551 atomic_dec(&shared->in_flight[idx]);
552 p->sectors[idx] += len;
553 p->ios[idx] += 1;
554 p->merges[idx] += stats_aux->merged;
555 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
556 p->ticks[idx] += duration_jiffies;
557 duration = jiffies_to_msecs(duration_jiffies);
558 } else {
559 p->ticks[idx] += stats_aux->duration_ns;
560 duration = stats_aux->duration_ns;
561 }
562 if (s->n_histogram_entries) {
563 unsigned lo = 0, hi = s->n_histogram_entries + 1;
564 while (lo + 1 < hi) {
565 unsigned mid = (lo + hi) / 2;
566 if (s->histogram_boundaries[mid - 1] > duration) {
567 hi = mid;
568 } else {
569 lo = mid;
570 }
571
572 }
573 p->histogram[lo]++;
574 }
575 }
576
577#if BITS_PER_LONG == 32
578 local_irq_restore(flags);
579#else
580 preempt_enable();
581#endif
582}
583
584static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
585 sector_t bi_sector, sector_t end_sector,
586 bool end, unsigned long duration_jiffies,
587 struct dm_stats_aux *stats_aux)
588{
589 sector_t rel_sector, offset, todo, fragment_len;
590 size_t entry;
591
592 if (end_sector <= s->start || bi_sector >= s->end)
593 return;
594 if (unlikely(bi_sector < s->start)) {
595 rel_sector = 0;
596 todo = end_sector - s->start;
597 } else {
598 rel_sector = bi_sector - s->start;
599 todo = end_sector - bi_sector;
600 }
601 if (unlikely(end_sector > s->end))
602 todo -= (end_sector - s->end);
603
604 offset = dm_sector_div64(rel_sector, s->step);
605 entry = rel_sector;
606 do {
607 if (WARN_ON_ONCE(entry >= s->n_entries)) {
608 DMCRIT("Invalid area access in region id %d", s->id);
609 return;
610 }
611 fragment_len = todo;
612 if (fragment_len > s->step - offset)
613 fragment_len = s->step - offset;
614 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
615 stats_aux, end, duration_jiffies);
616 todo -= fragment_len;
617 entry++;
618 offset = 0;
619 } while (unlikely(todo != 0));
620}
621
622void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
623 sector_t bi_sector, unsigned bi_sectors, bool end,
624 unsigned long duration_jiffies,
625 struct dm_stats_aux *stats_aux)
626{
627 struct dm_stat *s;
628 sector_t end_sector;
629 struct dm_stats_last_position *last;
630 bool got_precise_time;
631
632 if (unlikely(!bi_sectors))
633 return;
634
635 end_sector = bi_sector + bi_sectors;
636
637 if (!end) {
638 /*
639 * A race condition can at worst result in the merged flag being
640 * misrepresented, so we don't have to disable preemption here.
641 */
642 last = raw_cpu_ptr(stats->last);
643 stats_aux->merged =
644 (bi_sector == (READ_ONCE(last->last_sector) &&
645 ((bi_rw == WRITE) ==
646 (READ_ONCE(last->last_rw) == WRITE))
647 ));
648 WRITE_ONCE(last->last_sector, end_sector);
649 WRITE_ONCE(last->last_rw, bi_rw);
650 }
651
652 rcu_read_lock();
653
654 got_precise_time = false;
655 list_for_each_entry_rcu(s, &stats->list, list_entry) {
656 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
657 if (!end)
658 stats_aux->duration_ns = ktime_to_ns(ktime_get());
659 else
660 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
661 got_precise_time = true;
662 }
663 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
664 }
665
666 rcu_read_unlock();
667}
668
669static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
670 struct dm_stat *s, size_t x)
671{
672 int cpu;
673 struct dm_stat_percpu *p;
674
675 local_irq_disable();
676 p = &s->stat_percpu[smp_processor_id()][x];
677 dm_stat_round(s, shared, p);
678 local_irq_enable();
679
680 shared->tmp.sectors[READ] = 0;
681 shared->tmp.sectors[WRITE] = 0;
682 shared->tmp.ios[READ] = 0;
683 shared->tmp.ios[WRITE] = 0;
684 shared->tmp.merges[READ] = 0;
685 shared->tmp.merges[WRITE] = 0;
686 shared->tmp.ticks[READ] = 0;
687 shared->tmp.ticks[WRITE] = 0;
688 shared->tmp.io_ticks[READ] = 0;
689 shared->tmp.io_ticks[WRITE] = 0;
690 shared->tmp.io_ticks_total = 0;
691 shared->tmp.time_in_queue = 0;
692
693 if (s->n_histogram_entries)
694 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
695
696 for_each_possible_cpu(cpu) {
697 p = &s->stat_percpu[cpu][x];
698 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
699 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
700 shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
701 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
702 shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
703 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
704 shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
705 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
706 shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
707 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
708 shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
709 shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
710 if (s->n_histogram_entries) {
711 unsigned i;
712 for (i = 0; i < s->n_histogram_entries + 1; i++)
713 shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
714 }
715 }
716}
717
718static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
719 bool init_tmp_percpu_totals)
720{
721 size_t x;
722 struct dm_stat_shared *shared;
723 struct dm_stat_percpu *p;
724
725 for (x = idx_start; x < idx_end; x++) {
726 shared = &s->stat_shared[x];
727 if (init_tmp_percpu_totals)
728 __dm_stat_init_temporary_percpu_totals(shared, s, x);
729 local_irq_disable();
730 p = &s->stat_percpu[smp_processor_id()][x];
731 p->sectors[READ] -= shared->tmp.sectors[READ];
732 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
733 p->ios[READ] -= shared->tmp.ios[READ];
734 p->ios[WRITE] -= shared->tmp.ios[WRITE];
735 p->merges[READ] -= shared->tmp.merges[READ];
736 p->merges[WRITE] -= shared->tmp.merges[WRITE];
737 p->ticks[READ] -= shared->tmp.ticks[READ];
738 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
739 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
740 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
741 p->io_ticks_total -= shared->tmp.io_ticks_total;
742 p->time_in_queue -= shared->tmp.time_in_queue;
743 local_irq_enable();
744 if (s->n_histogram_entries) {
745 unsigned i;
746 for (i = 0; i < s->n_histogram_entries + 1; i++) {
747 local_irq_disable();
748 p = &s->stat_percpu[smp_processor_id()][x];
749 p->histogram[i] -= shared->tmp.histogram[i];
750 local_irq_enable();
751 }
752 }
753 }
754}
755
756static int dm_stats_clear(struct dm_stats *stats, int id)
757{
758 struct dm_stat *s;
759
760 mutex_lock(&stats->mutex);
761
762 s = __dm_stats_find(stats, id);
763 if (!s) {
764 mutex_unlock(&stats->mutex);
765 return -ENOENT;
766 }
767
768 __dm_stat_clear(s, 0, s->n_entries, true);
769
770 mutex_unlock(&stats->mutex);
771
772 return 1;
773}
774
775/*
776 * This is like jiffies_to_msec, but works for 64-bit values.
777 */
778static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
779{
780 unsigned long long result;
781 unsigned mult;
782
783 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
784 return j;
785
786 result = 0;
787 if (j)
788 result = jiffies_to_msecs(j & 0x3fffff);
789 if (j >= 1 << 22) {
790 mult = jiffies_to_msecs(1 << 22);
791 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
792 }
793 if (j >= 1ULL << 44)
794 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
795
796 return result;
797}
798
799static int dm_stats_print(struct dm_stats *stats, int id,
800 size_t idx_start, size_t idx_len,
801 bool clear, char *result, unsigned maxlen)
802{
803 unsigned sz = 0;
804 struct dm_stat *s;
805 size_t x;
806 sector_t start, end, step;
807 size_t idx_end;
808 struct dm_stat_shared *shared;
809
810 /*
811 * Output format:
812 * <start_sector>+<length> counters
813 */
814
815 mutex_lock(&stats->mutex);
816
817 s = __dm_stats_find(stats, id);
818 if (!s) {
819 mutex_unlock(&stats->mutex);
820 return -ENOENT;
821 }
822
823 idx_end = idx_start + idx_len;
824 if (idx_end < idx_start ||
825 idx_end > s->n_entries)
826 idx_end = s->n_entries;
827
828 if (idx_start > idx_end)
829 idx_start = idx_end;
830
831 step = s->step;
832 start = s->start + (step * idx_start);
833
834 for (x = idx_start; x < idx_end; x++, start = end) {
835 shared = &s->stat_shared[x];
836 end = start + step;
837 if (unlikely(end > s->end))
838 end = s->end;
839
840 __dm_stat_init_temporary_percpu_totals(shared, s, x);
841
842 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
843 (unsigned long long)start,
844 (unsigned long long)step,
845 shared->tmp.ios[READ],
846 shared->tmp.merges[READ],
847 shared->tmp.sectors[READ],
848 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
849 shared->tmp.ios[WRITE],
850 shared->tmp.merges[WRITE],
851 shared->tmp.sectors[WRITE],
852 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
853 dm_stat_in_flight(shared),
854 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
855 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
856 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
857 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
858 if (s->n_histogram_entries) {
859 unsigned i;
860 for (i = 0; i < s->n_histogram_entries + 1; i++) {
861 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
862 }
863 }
864 DMEMIT("\n");
865
866 if (unlikely(sz + 1 >= maxlen))
867 goto buffer_overflow;
868 }
869
870 if (clear)
871 __dm_stat_clear(s, idx_start, idx_end, false);
872
873buffer_overflow:
874 mutex_unlock(&stats->mutex);
875
876 return 1;
877}
878
879static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
880{
881 struct dm_stat *s;
882 const char *new_aux_data;
883
884 mutex_lock(&stats->mutex);
885
886 s = __dm_stats_find(stats, id);
887 if (!s) {
888 mutex_unlock(&stats->mutex);
889 return -ENOENT;
890 }
891
892 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
893 if (!new_aux_data) {
894 mutex_unlock(&stats->mutex);
895 return -ENOMEM;
896 }
897
898 kfree(s->aux_data);
899 s->aux_data = new_aux_data;
900
901 mutex_unlock(&stats->mutex);
902
903 return 0;
904}
905
906static int parse_histogram(const char *h, unsigned *n_histogram_entries,
907 unsigned long long **histogram_boundaries)
908{
909 const char *q;
910 unsigned n;
911 unsigned long long last;
912
913 *n_histogram_entries = 1;
914 for (q = h; *q; q++)
915 if (*q == ',')
916 (*n_histogram_entries)++;
917
918 *histogram_boundaries = kmalloc_array(*n_histogram_entries,
919 sizeof(unsigned long long),
920 GFP_KERNEL);
921 if (!*histogram_boundaries)
922 return -ENOMEM;
923
924 n = 0;
925 last = 0;
926 while (1) {
927 unsigned long long hi;
928 int s;
929 char ch;
930 s = sscanf(h, "%llu%c", &hi, &ch);
931 if (!s || (s == 2 && ch != ','))
932 return -EINVAL;
933 if (hi <= last)
934 return -EINVAL;
935 last = hi;
936 (*histogram_boundaries)[n] = hi;
937 if (s == 1)
938 return 0;
939 h = strchr(h, ',') + 1;
940 n++;
941 }
942}
943
944static int message_stats_create(struct mapped_device *md,
945 unsigned argc, char **argv,
946 char *result, unsigned maxlen)
947{
948 int r;
949 int id;
950 char dummy;
951 unsigned long long start, end, len, step;
952 unsigned divisor;
953 const char *program_id, *aux_data;
954 unsigned stat_flags = 0;
955
956 unsigned n_histogram_entries = 0;
957 unsigned long long *histogram_boundaries = NULL;
958
959 struct dm_arg_set as, as_backup;
960 const char *a;
961 unsigned feature_args;
962
963 /*
964 * Input format:
965 * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
966 */
967
968 if (argc < 3)
969 goto ret_einval;
970
971 as.argc = argc;
972 as.argv = argv;
973 dm_consume_args(&as, 1);
974
975 a = dm_shift_arg(&as);
976 if (!strcmp(a, "-")) {
977 start = 0;
978 len = dm_get_size(md);
979 if (!len)
980 len = 1;
981 } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
982 start != (sector_t)start || len != (sector_t)len)
983 goto ret_einval;
984
985 end = start + len;
986 if (start >= end)
987 goto ret_einval;
988
989 a = dm_shift_arg(&as);
990 if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
991 if (!divisor)
992 return -EINVAL;
993 step = end - start;
994 if (do_div(step, divisor))
995 step++;
996 if (!step)
997 step = 1;
998 } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
999 step != (sector_t)step || !step)
1000 goto ret_einval;
1001
1002 as_backup = as;
1003 a = dm_shift_arg(&as);
1004 if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1005 while (feature_args--) {
1006 a = dm_shift_arg(&as);
1007 if (!a)
1008 goto ret_einval;
1009 if (!strcasecmp(a, "precise_timestamps"))
1010 stat_flags |= STAT_PRECISE_TIMESTAMPS;
1011 else if (!strncasecmp(a, "histogram:", 10)) {
1012 if (n_histogram_entries)
1013 goto ret_einval;
1014 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1015 goto ret;
1016 } else
1017 goto ret_einval;
1018 }
1019 } else {
1020 as = as_backup;
1021 }
1022
1023 program_id = "-";
1024 aux_data = "-";
1025
1026 a = dm_shift_arg(&as);
1027 if (a)
1028 program_id = a;
1029
1030 a = dm_shift_arg(&as);
1031 if (a)
1032 aux_data = a;
1033
1034 if (as.argc)
1035 goto ret_einval;
1036
1037 /*
1038 * If a buffer overflow happens after we created the region,
1039 * it's too late (the userspace would retry with a larger
1040 * buffer, but the region id that caused the overflow is already
1041 * leaked). So we must detect buffer overflow in advance.
1042 */
1043 snprintf(result, maxlen, "%d", INT_MAX);
1044 if (dm_message_test_buffer_overflow(result, maxlen)) {
1045 r = 1;
1046 goto ret;
1047 }
1048
1049 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1050 n_histogram_entries, histogram_boundaries, program_id, aux_data,
1051 dm_internal_suspend_fast, dm_internal_resume_fast, md);
1052 if (id < 0) {
1053 r = id;
1054 goto ret;
1055 }
1056
1057 snprintf(result, maxlen, "%d", id);
1058
1059 r = 1;
1060 goto ret;
1061
1062ret_einval:
1063 r = -EINVAL;
1064ret:
1065 kfree(histogram_boundaries);
1066 return r;
1067}
1068
1069static int message_stats_delete(struct mapped_device *md,
1070 unsigned argc, char **argv)
1071{
1072 int id;
1073 char dummy;
1074
1075 if (argc != 2)
1076 return -EINVAL;
1077
1078 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1079 return -EINVAL;
1080
1081 return dm_stats_delete(dm_get_stats(md), id);
1082}
1083
1084static int message_stats_clear(struct mapped_device *md,
1085 unsigned argc, char **argv)
1086{
1087 int id;
1088 char dummy;
1089
1090 if (argc != 2)
1091 return -EINVAL;
1092
1093 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1094 return -EINVAL;
1095
1096 return dm_stats_clear(dm_get_stats(md), id);
1097}
1098
1099static int message_stats_list(struct mapped_device *md,
1100 unsigned argc, char **argv,
1101 char *result, unsigned maxlen)
1102{
1103 int r;
1104 const char *program = NULL;
1105
1106 if (argc < 1 || argc > 2)
1107 return -EINVAL;
1108
1109 if (argc > 1) {
1110 program = kstrdup(argv[1], GFP_KERNEL);
1111 if (!program)
1112 return -ENOMEM;
1113 }
1114
1115 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1116
1117 kfree(program);
1118
1119 return r;
1120}
1121
1122static int message_stats_print(struct mapped_device *md,
1123 unsigned argc, char **argv, bool clear,
1124 char *result, unsigned maxlen)
1125{
1126 int id;
1127 char dummy;
1128 unsigned long idx_start = 0, idx_len = ULONG_MAX;
1129
1130 if (argc != 2 && argc != 4)
1131 return -EINVAL;
1132
1133 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1134 return -EINVAL;
1135
1136 if (argc > 3) {
1137 if (strcmp(argv[2], "-") &&
1138 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1139 return -EINVAL;
1140 if (strcmp(argv[3], "-") &&
1141 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1142 return -EINVAL;
1143 }
1144
1145 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1146 result, maxlen);
1147}
1148
1149static int message_stats_set_aux(struct mapped_device *md,
1150 unsigned argc, char **argv)
1151{
1152 int id;
1153 char dummy;
1154
1155 if (argc != 3)
1156 return -EINVAL;
1157
1158 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1159 return -EINVAL;
1160
1161 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1162}
1163
1164int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1165 char *result, unsigned maxlen)
1166{
1167 int r;
1168
1169 /* All messages here must start with '@' */
1170 if (!strcasecmp(argv[0], "@stats_create"))
1171 r = message_stats_create(md, argc, argv, result, maxlen);
1172 else if (!strcasecmp(argv[0], "@stats_delete"))
1173 r = message_stats_delete(md, argc, argv);
1174 else if (!strcasecmp(argv[0], "@stats_clear"))
1175 r = message_stats_clear(md, argc, argv);
1176 else if (!strcasecmp(argv[0], "@stats_list"))
1177 r = message_stats_list(md, argc, argv, result, maxlen);
1178 else if (!strcasecmp(argv[0], "@stats_print"))
1179 r = message_stats_print(md, argc, argv, false, result, maxlen);
1180 else if (!strcasecmp(argv[0], "@stats_print_clear"))
1181 r = message_stats_print(md, argc, argv, true, result, maxlen);
1182 else if (!strcasecmp(argv[0], "@stats_set_aux"))
1183 r = message_stats_set_aux(md, argc, argv);
1184 else
1185 return 2; /* this wasn't a stats message */
1186
1187 if (r == -EINVAL)
1188 DMWARN("Invalid parameters for message %s", argv[0]);
1189
1190 return r;
1191}
1192
1193int __init dm_statistics_init(void)
1194{
1195 shared_memory_amount = 0;
1196 dm_stat_need_rcu_barrier = 0;
1197 return 0;
1198}
1199
1200void dm_statistics_exit(void)
1201{
1202 if (dm_stat_need_rcu_barrier)
1203 rcu_barrier();
1204 if (WARN_ON(shared_memory_amount))
1205 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1206}
1207
1208module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1209MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
1#include <linux/errno.h>
2#include <linux/numa.h>
3#include <linux/slab.h>
4#include <linux/rculist.h>
5#include <linux/threads.h>
6#include <linux/preempt.h>
7#include <linux/irqflags.h>
8#include <linux/vmalloc.h>
9#include <linux/mm.h>
10#include <linux/module.h>
11#include <linux/device-mapper.h>
12
13#include "dm.h"
14#include "dm-stats.h"
15
16#define DM_MSG_PREFIX "stats"
17
18static int dm_stat_need_rcu_barrier;
19
20/*
21 * Using 64-bit values to avoid overflow (which is a
22 * problem that block/genhd.c's IO accounting has).
23 */
24struct dm_stat_percpu {
25 unsigned long long sectors[2];
26 unsigned long long ios[2];
27 unsigned long long merges[2];
28 unsigned long long ticks[2];
29 unsigned long long io_ticks[2];
30 unsigned long long io_ticks_total;
31 unsigned long long time_in_queue;
32};
33
34struct dm_stat_shared {
35 atomic_t in_flight[2];
36 unsigned long stamp;
37 struct dm_stat_percpu tmp;
38};
39
40struct dm_stat {
41 struct list_head list_entry;
42 int id;
43 size_t n_entries;
44 sector_t start;
45 sector_t end;
46 sector_t step;
47 const char *program_id;
48 const char *aux_data;
49 struct rcu_head rcu_head;
50 size_t shared_alloc_size;
51 size_t percpu_alloc_size;
52 struct dm_stat_percpu *stat_percpu[NR_CPUS];
53 struct dm_stat_shared stat_shared[0];
54};
55
56struct dm_stats_last_position {
57 sector_t last_sector;
58 unsigned last_rw;
59};
60
61/*
62 * A typo on the command line could possibly make the kernel run out of memory
63 * and crash. To prevent the crash we account all used memory. We fail if we
64 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
65 */
66#define DM_STATS_MEMORY_FACTOR 4
67#define DM_STATS_VMALLOC_FACTOR 2
68
69static DEFINE_SPINLOCK(shared_memory_lock);
70
71static unsigned long shared_memory_amount;
72
73static bool __check_shared_memory(size_t alloc_size)
74{
75 size_t a;
76
77 a = shared_memory_amount + alloc_size;
78 if (a < shared_memory_amount)
79 return false;
80 if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
81 return false;
82#ifdef CONFIG_MMU
83 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
84 return false;
85#endif
86 return true;
87}
88
89static bool check_shared_memory(size_t alloc_size)
90{
91 bool ret;
92
93 spin_lock_irq(&shared_memory_lock);
94
95 ret = __check_shared_memory(alloc_size);
96
97 spin_unlock_irq(&shared_memory_lock);
98
99 return ret;
100}
101
102static bool claim_shared_memory(size_t alloc_size)
103{
104 spin_lock_irq(&shared_memory_lock);
105
106 if (!__check_shared_memory(alloc_size)) {
107 spin_unlock_irq(&shared_memory_lock);
108 return false;
109 }
110
111 shared_memory_amount += alloc_size;
112
113 spin_unlock_irq(&shared_memory_lock);
114
115 return true;
116}
117
118static void free_shared_memory(size_t alloc_size)
119{
120 unsigned long flags;
121
122 spin_lock_irqsave(&shared_memory_lock, flags);
123
124 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
125 spin_unlock_irqrestore(&shared_memory_lock, flags);
126 DMCRIT("Memory usage accounting bug.");
127 return;
128 }
129
130 shared_memory_amount -= alloc_size;
131
132 spin_unlock_irqrestore(&shared_memory_lock, flags);
133}
134
135static void *dm_kvzalloc(size_t alloc_size, int node)
136{
137 void *p;
138
139 if (!claim_shared_memory(alloc_size))
140 return NULL;
141
142 if (alloc_size <= KMALLOC_MAX_SIZE) {
143 p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
144 if (p)
145 return p;
146 }
147 p = vzalloc_node(alloc_size, node);
148 if (p)
149 return p;
150
151 free_shared_memory(alloc_size);
152
153 return NULL;
154}
155
156static void dm_kvfree(void *ptr, size_t alloc_size)
157{
158 if (!ptr)
159 return;
160
161 free_shared_memory(alloc_size);
162
163 if (is_vmalloc_addr(ptr))
164 vfree(ptr);
165 else
166 kfree(ptr);
167}
168
169static void dm_stat_free(struct rcu_head *head)
170{
171 int cpu;
172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
173
174 kfree(s->program_id);
175 kfree(s->aux_data);
176 for_each_possible_cpu(cpu)
177 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
178 dm_kvfree(s, s->shared_alloc_size);
179}
180
181static int dm_stat_in_flight(struct dm_stat_shared *shared)
182{
183 return atomic_read(&shared->in_flight[READ]) +
184 atomic_read(&shared->in_flight[WRITE]);
185}
186
187void dm_stats_init(struct dm_stats *stats)
188{
189 int cpu;
190 struct dm_stats_last_position *last;
191
192 mutex_init(&stats->mutex);
193 INIT_LIST_HEAD(&stats->list);
194 stats->last = alloc_percpu(struct dm_stats_last_position);
195 for_each_possible_cpu(cpu) {
196 last = per_cpu_ptr(stats->last, cpu);
197 last->last_sector = (sector_t)ULLONG_MAX;
198 last->last_rw = UINT_MAX;
199 }
200}
201
202void dm_stats_cleanup(struct dm_stats *stats)
203{
204 size_t ni;
205 struct dm_stat *s;
206 struct dm_stat_shared *shared;
207
208 while (!list_empty(&stats->list)) {
209 s = container_of(stats->list.next, struct dm_stat, list_entry);
210 list_del(&s->list_entry);
211 for (ni = 0; ni < s->n_entries; ni++) {
212 shared = &s->stat_shared[ni];
213 if (WARN_ON(dm_stat_in_flight(shared))) {
214 DMCRIT("leaked in-flight counter at index %lu "
215 "(start %llu, end %llu, step %llu): reads %d, writes %d",
216 (unsigned long)ni,
217 (unsigned long long)s->start,
218 (unsigned long long)s->end,
219 (unsigned long long)s->step,
220 atomic_read(&shared->in_flight[READ]),
221 atomic_read(&shared->in_flight[WRITE]));
222 }
223 }
224 dm_stat_free(&s->rcu_head);
225 }
226 free_percpu(stats->last);
227}
228
229static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
230 sector_t step, const char *program_id, const char *aux_data,
231 void (*suspend_callback)(struct mapped_device *),
232 void (*resume_callback)(struct mapped_device *),
233 struct mapped_device *md)
234{
235 struct list_head *l;
236 struct dm_stat *s, *tmp_s;
237 sector_t n_entries;
238 size_t ni;
239 size_t shared_alloc_size;
240 size_t percpu_alloc_size;
241 struct dm_stat_percpu *p;
242 int cpu;
243 int ret_id;
244 int r;
245
246 if (end < start || !step)
247 return -EINVAL;
248
249 n_entries = end - start;
250 if (dm_sector_div64(n_entries, step))
251 n_entries++;
252
253 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
254 return -EOVERFLOW;
255
256 shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
257 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
258 return -EOVERFLOW;
259
260 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
261 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
262 return -EOVERFLOW;
263
264 if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
265 return -ENOMEM;
266
267 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
268 if (!s)
269 return -ENOMEM;
270
271 s->n_entries = n_entries;
272 s->start = start;
273 s->end = end;
274 s->step = step;
275 s->shared_alloc_size = shared_alloc_size;
276 s->percpu_alloc_size = percpu_alloc_size;
277
278 s->program_id = kstrdup(program_id, GFP_KERNEL);
279 if (!s->program_id) {
280 r = -ENOMEM;
281 goto out;
282 }
283 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
284 if (!s->aux_data) {
285 r = -ENOMEM;
286 goto out;
287 }
288
289 for (ni = 0; ni < n_entries; ni++) {
290 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
291 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
292 }
293
294 for_each_possible_cpu(cpu) {
295 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
296 if (!p) {
297 r = -ENOMEM;
298 goto out;
299 }
300 s->stat_percpu[cpu] = p;
301 }
302
303 /*
304 * Suspend/resume to make sure there is no i/o in flight,
305 * so that newly created statistics will be exact.
306 *
307 * (note: we couldn't suspend earlier because we must not
308 * allocate memory while suspended)
309 */
310 suspend_callback(md);
311
312 mutex_lock(&stats->mutex);
313 s->id = 0;
314 list_for_each(l, &stats->list) {
315 tmp_s = container_of(l, struct dm_stat, list_entry);
316 if (WARN_ON(tmp_s->id < s->id)) {
317 r = -EINVAL;
318 goto out_unlock_resume;
319 }
320 if (tmp_s->id > s->id)
321 break;
322 if (unlikely(s->id == INT_MAX)) {
323 r = -ENFILE;
324 goto out_unlock_resume;
325 }
326 s->id++;
327 }
328 ret_id = s->id;
329 list_add_tail_rcu(&s->list_entry, l);
330 mutex_unlock(&stats->mutex);
331
332 resume_callback(md);
333
334 return ret_id;
335
336out_unlock_resume:
337 mutex_unlock(&stats->mutex);
338 resume_callback(md);
339out:
340 dm_stat_free(&s->rcu_head);
341 return r;
342}
343
344static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
345{
346 struct dm_stat *s;
347
348 list_for_each_entry(s, &stats->list, list_entry) {
349 if (s->id > id)
350 break;
351 if (s->id == id)
352 return s;
353 }
354
355 return NULL;
356}
357
358static int dm_stats_delete(struct dm_stats *stats, int id)
359{
360 struct dm_stat *s;
361 int cpu;
362
363 mutex_lock(&stats->mutex);
364
365 s = __dm_stats_find(stats, id);
366 if (!s) {
367 mutex_unlock(&stats->mutex);
368 return -ENOENT;
369 }
370
371 list_del_rcu(&s->list_entry);
372 mutex_unlock(&stats->mutex);
373
374 /*
375 * vfree can't be called from RCU callback
376 */
377 for_each_possible_cpu(cpu)
378 if (is_vmalloc_addr(s->stat_percpu))
379 goto do_sync_free;
380 if (is_vmalloc_addr(s)) {
381do_sync_free:
382 synchronize_rcu_expedited();
383 dm_stat_free(&s->rcu_head);
384 } else {
385 ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
386 call_rcu(&s->rcu_head, dm_stat_free);
387 }
388 return 0;
389}
390
391static int dm_stats_list(struct dm_stats *stats, const char *program,
392 char *result, unsigned maxlen)
393{
394 struct dm_stat *s;
395 sector_t len;
396 unsigned sz = 0;
397
398 /*
399 * Output format:
400 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
401 */
402
403 mutex_lock(&stats->mutex);
404 list_for_each_entry(s, &stats->list, list_entry) {
405 if (!program || !strcmp(program, s->program_id)) {
406 len = s->end - s->start;
407 DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
408 (unsigned long long)s->start,
409 (unsigned long long)len,
410 (unsigned long long)s->step,
411 s->program_id,
412 s->aux_data);
413 }
414 }
415 mutex_unlock(&stats->mutex);
416
417 return 1;
418}
419
420static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
421{
422 /*
423 * This is racy, but so is part_round_stats_single.
424 */
425 unsigned long now = jiffies;
426 unsigned in_flight_read;
427 unsigned in_flight_write;
428 unsigned long difference = now - shared->stamp;
429
430 if (!difference)
431 return;
432 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
433 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
434 if (in_flight_read)
435 p->io_ticks[READ] += difference;
436 if (in_flight_write)
437 p->io_ticks[WRITE] += difference;
438 if (in_flight_read + in_flight_write) {
439 p->io_ticks_total += difference;
440 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
441 }
442 shared->stamp = now;
443}
444
445static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
446 unsigned long bi_rw, sector_t len, bool merged,
447 bool end, unsigned long duration)
448{
449 unsigned long idx = bi_rw & REQ_WRITE;
450 struct dm_stat_shared *shared = &s->stat_shared[entry];
451 struct dm_stat_percpu *p;
452
453 /*
454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable.
456 *
457 * preempt_disable/enable is racy if the driver finishes bios
458 * from non-interrupt context as well as from interrupt context
459 * or from more different interrupts.
460 *
461 * On 64-bit architectures the race only results in not counting some
462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
465 *
466 * part_stat_lock()/part_stat_unlock() have this race too.
467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
472 preempt_disable();
473#endif
474 p = &s->stat_percpu[smp_processor_id()][entry];
475
476 if (!end) {
477 dm_stat_round(shared, p);
478 atomic_inc(&shared->in_flight[idx]);
479 } else {
480 dm_stat_round(shared, p);
481 atomic_dec(&shared->in_flight[idx]);
482 p->sectors[idx] += len;
483 p->ios[idx] += 1;
484 p->merges[idx] += merged;
485 p->ticks[idx] += duration;
486 }
487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
491 preempt_enable();
492#endif
493}
494
495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
496 sector_t bi_sector, sector_t end_sector,
497 bool end, unsigned long duration,
498 struct dm_stats_aux *stats_aux)
499{
500 sector_t rel_sector, offset, todo, fragment_len;
501 size_t entry;
502
503 if (end_sector <= s->start || bi_sector >= s->end)
504 return;
505 if (unlikely(bi_sector < s->start)) {
506 rel_sector = 0;
507 todo = end_sector - s->start;
508 } else {
509 rel_sector = bi_sector - s->start;
510 todo = end_sector - bi_sector;
511 }
512 if (unlikely(end_sector > s->end))
513 todo -= (end_sector - s->end);
514
515 offset = dm_sector_div64(rel_sector, s->step);
516 entry = rel_sector;
517 do {
518 if (WARN_ON_ONCE(entry >= s->n_entries)) {
519 DMCRIT("Invalid area access in region id %d", s->id);
520 return;
521 }
522 fragment_len = todo;
523 if (fragment_len > s->step - offset)
524 fragment_len = s->step - offset;
525 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
526 stats_aux->merged, end, duration);
527 todo -= fragment_len;
528 entry++;
529 offset = 0;
530 } while (unlikely(todo != 0));
531}
532
533void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
534 sector_t bi_sector, unsigned bi_sectors, bool end,
535 unsigned long duration, struct dm_stats_aux *stats_aux)
536{
537 struct dm_stat *s;
538 sector_t end_sector;
539 struct dm_stats_last_position *last;
540
541 if (unlikely(!bi_sectors))
542 return;
543
544 end_sector = bi_sector + bi_sectors;
545
546 if (!end) {
547 /*
548 * A race condition can at worst result in the merged flag being
549 * misrepresented, so we don't have to disable preemption here.
550 */
551 last = __this_cpu_ptr(stats->last);
552 stats_aux->merged =
553 (bi_sector == (ACCESS_ONCE(last->last_sector) &&
554 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
555 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
556 ));
557 ACCESS_ONCE(last->last_sector) = end_sector;
558 ACCESS_ONCE(last->last_rw) = bi_rw;
559 }
560
561 rcu_read_lock();
562
563 list_for_each_entry_rcu(s, &stats->list, list_entry)
564 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
565
566 rcu_read_unlock();
567}
568
569static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
570 struct dm_stat *s, size_t x)
571{
572 int cpu;
573 struct dm_stat_percpu *p;
574
575 local_irq_disable();
576 p = &s->stat_percpu[smp_processor_id()][x];
577 dm_stat_round(shared, p);
578 local_irq_enable();
579
580 memset(&shared->tmp, 0, sizeof(shared->tmp));
581 for_each_possible_cpu(cpu) {
582 p = &s->stat_percpu[cpu][x];
583 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
584 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
585 shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
586 shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
587 shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
588 shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
589 shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
590 shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
591 shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
592 shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
593 shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
594 shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
595 }
596}
597
598static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
599 bool init_tmp_percpu_totals)
600{
601 size_t x;
602 struct dm_stat_shared *shared;
603 struct dm_stat_percpu *p;
604
605 for (x = idx_start; x < idx_end; x++) {
606 shared = &s->stat_shared[x];
607 if (init_tmp_percpu_totals)
608 __dm_stat_init_temporary_percpu_totals(shared, s, x);
609 local_irq_disable();
610 p = &s->stat_percpu[smp_processor_id()][x];
611 p->sectors[READ] -= shared->tmp.sectors[READ];
612 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
613 p->ios[READ] -= shared->tmp.ios[READ];
614 p->ios[WRITE] -= shared->tmp.ios[WRITE];
615 p->merges[READ] -= shared->tmp.merges[READ];
616 p->merges[WRITE] -= shared->tmp.merges[WRITE];
617 p->ticks[READ] -= shared->tmp.ticks[READ];
618 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
619 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
620 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
621 p->io_ticks_total -= shared->tmp.io_ticks_total;
622 p->time_in_queue -= shared->tmp.time_in_queue;
623 local_irq_enable();
624 }
625}
626
627static int dm_stats_clear(struct dm_stats *stats, int id)
628{
629 struct dm_stat *s;
630
631 mutex_lock(&stats->mutex);
632
633 s = __dm_stats_find(stats, id);
634 if (!s) {
635 mutex_unlock(&stats->mutex);
636 return -ENOENT;
637 }
638
639 __dm_stat_clear(s, 0, s->n_entries, true);
640
641 mutex_unlock(&stats->mutex);
642
643 return 1;
644}
645
646/*
647 * This is like jiffies_to_msec, but works for 64-bit values.
648 */
649static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
650{
651 unsigned long long result = 0;
652 unsigned mult;
653
654 if (j)
655 result = jiffies_to_msecs(j & 0x3fffff);
656 if (j >= 1 << 22) {
657 mult = jiffies_to_msecs(1 << 22);
658 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
659 }
660 if (j >= 1ULL << 44)
661 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
662
663 return result;
664}
665
666static int dm_stats_print(struct dm_stats *stats, int id,
667 size_t idx_start, size_t idx_len,
668 bool clear, char *result, unsigned maxlen)
669{
670 unsigned sz = 0;
671 struct dm_stat *s;
672 size_t x;
673 sector_t start, end, step;
674 size_t idx_end;
675 struct dm_stat_shared *shared;
676
677 /*
678 * Output format:
679 * <start_sector>+<length> counters
680 */
681
682 mutex_lock(&stats->mutex);
683
684 s = __dm_stats_find(stats, id);
685 if (!s) {
686 mutex_unlock(&stats->mutex);
687 return -ENOENT;
688 }
689
690 idx_end = idx_start + idx_len;
691 if (idx_end < idx_start ||
692 idx_end > s->n_entries)
693 idx_end = s->n_entries;
694
695 if (idx_start > idx_end)
696 idx_start = idx_end;
697
698 step = s->step;
699 start = s->start + (step * idx_start);
700
701 for (x = idx_start; x < idx_end; x++, start = end) {
702 shared = &s->stat_shared[x];
703 end = start + step;
704 if (unlikely(end > s->end))
705 end = s->end;
706
707 __dm_stat_init_temporary_percpu_totals(shared, s, x);
708
709 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
710 (unsigned long long)start,
711 (unsigned long long)step,
712 shared->tmp.ios[READ],
713 shared->tmp.merges[READ],
714 shared->tmp.sectors[READ],
715 dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
716 shared->tmp.ios[WRITE],
717 shared->tmp.merges[WRITE],
718 shared->tmp.sectors[WRITE],
719 dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
720 dm_stat_in_flight(shared),
721 dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
722 dm_jiffies_to_msec64(shared->tmp.time_in_queue),
723 dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
724 dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
725
726 if (unlikely(sz + 1 >= maxlen))
727 goto buffer_overflow;
728 }
729
730 if (clear)
731 __dm_stat_clear(s, idx_start, idx_end, false);
732
733buffer_overflow:
734 mutex_unlock(&stats->mutex);
735
736 return 1;
737}
738
739static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
740{
741 struct dm_stat *s;
742 const char *new_aux_data;
743
744 mutex_lock(&stats->mutex);
745
746 s = __dm_stats_find(stats, id);
747 if (!s) {
748 mutex_unlock(&stats->mutex);
749 return -ENOENT;
750 }
751
752 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
753 if (!new_aux_data) {
754 mutex_unlock(&stats->mutex);
755 return -ENOMEM;
756 }
757
758 kfree(s->aux_data);
759 s->aux_data = new_aux_data;
760
761 mutex_unlock(&stats->mutex);
762
763 return 0;
764}
765
766static int message_stats_create(struct mapped_device *md,
767 unsigned argc, char **argv,
768 char *result, unsigned maxlen)
769{
770 int id;
771 char dummy;
772 unsigned long long start, end, len, step;
773 unsigned divisor;
774 const char *program_id, *aux_data;
775
776 /*
777 * Input format:
778 * <range> <step> [<program_id> [<aux_data>]]
779 */
780
781 if (argc < 3 || argc > 5)
782 return -EINVAL;
783
784 if (!strcmp(argv[1], "-")) {
785 start = 0;
786 len = dm_get_size(md);
787 if (!len)
788 len = 1;
789 } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
790 start != (sector_t)start || len != (sector_t)len)
791 return -EINVAL;
792
793 end = start + len;
794 if (start >= end)
795 return -EINVAL;
796
797 if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
798 step = end - start;
799 if (do_div(step, divisor))
800 step++;
801 if (!step)
802 step = 1;
803 } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
804 step != (sector_t)step || !step)
805 return -EINVAL;
806
807 program_id = "-";
808 aux_data = "-";
809
810 if (argc > 3)
811 program_id = argv[3];
812
813 if (argc > 4)
814 aux_data = argv[4];
815
816 /*
817 * If a buffer overflow happens after we created the region,
818 * it's too late (the userspace would retry with a larger
819 * buffer, but the region id that caused the overflow is already
820 * leaked). So we must detect buffer overflow in advance.
821 */
822 snprintf(result, maxlen, "%d", INT_MAX);
823 if (dm_message_test_buffer_overflow(result, maxlen))
824 return 1;
825
826 id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
827 dm_internal_suspend, dm_internal_resume, md);
828 if (id < 0)
829 return id;
830
831 snprintf(result, maxlen, "%d", id);
832
833 return 1;
834}
835
836static int message_stats_delete(struct mapped_device *md,
837 unsigned argc, char **argv)
838{
839 int id;
840 char dummy;
841
842 if (argc != 2)
843 return -EINVAL;
844
845 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
846 return -EINVAL;
847
848 return dm_stats_delete(dm_get_stats(md), id);
849}
850
851static int message_stats_clear(struct mapped_device *md,
852 unsigned argc, char **argv)
853{
854 int id;
855 char dummy;
856
857 if (argc != 2)
858 return -EINVAL;
859
860 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
861 return -EINVAL;
862
863 return dm_stats_clear(dm_get_stats(md), id);
864}
865
866static int message_stats_list(struct mapped_device *md,
867 unsigned argc, char **argv,
868 char *result, unsigned maxlen)
869{
870 int r;
871 const char *program = NULL;
872
873 if (argc < 1 || argc > 2)
874 return -EINVAL;
875
876 if (argc > 1) {
877 program = kstrdup(argv[1], GFP_KERNEL);
878 if (!program)
879 return -ENOMEM;
880 }
881
882 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
883
884 kfree(program);
885
886 return r;
887}
888
889static int message_stats_print(struct mapped_device *md,
890 unsigned argc, char **argv, bool clear,
891 char *result, unsigned maxlen)
892{
893 int id;
894 char dummy;
895 unsigned long idx_start = 0, idx_len = ULONG_MAX;
896
897 if (argc != 2 && argc != 4)
898 return -EINVAL;
899
900 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
901 return -EINVAL;
902
903 if (argc > 3) {
904 if (strcmp(argv[2], "-") &&
905 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
906 return -EINVAL;
907 if (strcmp(argv[3], "-") &&
908 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
909 return -EINVAL;
910 }
911
912 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
913 result, maxlen);
914}
915
916static int message_stats_set_aux(struct mapped_device *md,
917 unsigned argc, char **argv)
918{
919 int id;
920 char dummy;
921
922 if (argc != 3)
923 return -EINVAL;
924
925 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
926 return -EINVAL;
927
928 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
929}
930
931int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
932 char *result, unsigned maxlen)
933{
934 int r;
935
936 if (dm_request_based(md)) {
937 DMWARN("Statistics are only supported for bio-based devices");
938 return -EOPNOTSUPP;
939 }
940
941 /* All messages here must start with '@' */
942 if (!strcasecmp(argv[0], "@stats_create"))
943 r = message_stats_create(md, argc, argv, result, maxlen);
944 else if (!strcasecmp(argv[0], "@stats_delete"))
945 r = message_stats_delete(md, argc, argv);
946 else if (!strcasecmp(argv[0], "@stats_clear"))
947 r = message_stats_clear(md, argc, argv);
948 else if (!strcasecmp(argv[0], "@stats_list"))
949 r = message_stats_list(md, argc, argv, result, maxlen);
950 else if (!strcasecmp(argv[0], "@stats_print"))
951 r = message_stats_print(md, argc, argv, false, result, maxlen);
952 else if (!strcasecmp(argv[0], "@stats_print_clear"))
953 r = message_stats_print(md, argc, argv, true, result, maxlen);
954 else if (!strcasecmp(argv[0], "@stats_set_aux"))
955 r = message_stats_set_aux(md, argc, argv);
956 else
957 return 2; /* this wasn't a stats message */
958
959 if (r == -EINVAL)
960 DMWARN("Invalid parameters for message %s", argv[0]);
961
962 return r;
963}
964
965int __init dm_statistics_init(void)
966{
967 shared_memory_amount = 0;
968 dm_stat_need_rcu_barrier = 0;
969 return 0;
970}
971
972void dm_statistics_exit(void)
973{
974 if (dm_stat_need_rcu_barrier)
975 rcu_barrier();
976 if (WARN_ON(shared_memory_amount))
977 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
978}
979
980module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
981MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");