Linux Audio

Check our new training course

Loading...
v4.6
 
   1#include <linux/errno.h>
   2#include <linux/numa.h>
   3#include <linux/slab.h>
   4#include <linux/rculist.h>
   5#include <linux/threads.h>
   6#include <linux/preempt.h>
   7#include <linux/irqflags.h>
   8#include <linux/vmalloc.h>
   9#include <linux/mm.h>
  10#include <linux/module.h>
  11#include <linux/device-mapper.h>
  12
  13#include "dm.h"
  14#include "dm-stats.h"
  15
  16#define DM_MSG_PREFIX "stats"
  17
  18static int dm_stat_need_rcu_barrier;
  19
  20/*
  21 * Using 64-bit values to avoid overflow (which is a
  22 * problem that block/genhd.c's IO accounting has).
  23 */
  24struct dm_stat_percpu {
  25	unsigned long long sectors[2];
  26	unsigned long long ios[2];
  27	unsigned long long merges[2];
  28	unsigned long long ticks[2];
  29	unsigned long long io_ticks[2];
  30	unsigned long long io_ticks_total;
  31	unsigned long long time_in_queue;
  32	unsigned long long *histogram;
  33};
  34
  35struct dm_stat_shared {
  36	atomic_t in_flight[2];
  37	unsigned long long stamp;
  38	struct dm_stat_percpu tmp;
  39};
  40
  41struct dm_stat {
  42	struct list_head list_entry;
  43	int id;
  44	unsigned stat_flags;
  45	size_t n_entries;
  46	sector_t start;
  47	sector_t end;
  48	sector_t step;
  49	unsigned n_histogram_entries;
  50	unsigned long long *histogram_boundaries;
  51	const char *program_id;
  52	const char *aux_data;
  53	struct rcu_head rcu_head;
  54	size_t shared_alloc_size;
  55	size_t percpu_alloc_size;
  56	size_t histogram_alloc_size;
  57	struct dm_stat_percpu *stat_percpu[NR_CPUS];
  58	struct dm_stat_shared stat_shared[0];
  59};
  60
  61#define STAT_PRECISE_TIMESTAMPS		1
  62
  63struct dm_stats_last_position {
  64	sector_t last_sector;
  65	unsigned last_rw;
  66};
  67
  68/*
  69 * A typo on the command line could possibly make the kernel run out of memory
  70 * and crash. To prevent the crash we account all used memory. We fail if we
  71 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
  72 */
  73#define DM_STATS_MEMORY_FACTOR		4
  74#define DM_STATS_VMALLOC_FACTOR		2
  75
  76static DEFINE_SPINLOCK(shared_memory_lock);
  77
  78static unsigned long shared_memory_amount;
  79
  80static bool __check_shared_memory(size_t alloc_size)
  81{
  82	size_t a;
  83
  84	a = shared_memory_amount + alloc_size;
  85	if (a < shared_memory_amount)
  86		return false;
  87	if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
  88		return false;
  89#ifdef CONFIG_MMU
  90	if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
  91		return false;
  92#endif
  93	return true;
  94}
  95
  96static bool check_shared_memory(size_t alloc_size)
  97{
  98	bool ret;
  99
 100	spin_lock_irq(&shared_memory_lock);
 101
 102	ret = __check_shared_memory(alloc_size);
 103
 104	spin_unlock_irq(&shared_memory_lock);
 105
 106	return ret;
 107}
 108
 109static bool claim_shared_memory(size_t alloc_size)
 110{
 111	spin_lock_irq(&shared_memory_lock);
 112
 113	if (!__check_shared_memory(alloc_size)) {
 114		spin_unlock_irq(&shared_memory_lock);
 115		return false;
 116	}
 117
 118	shared_memory_amount += alloc_size;
 119
 120	spin_unlock_irq(&shared_memory_lock);
 121
 122	return true;
 123}
 124
 125static void free_shared_memory(size_t alloc_size)
 126{
 127	unsigned long flags;
 128
 129	spin_lock_irqsave(&shared_memory_lock, flags);
 130
 131	if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
 132		spin_unlock_irqrestore(&shared_memory_lock, flags);
 133		DMCRIT("Memory usage accounting bug.");
 134		return;
 135	}
 136
 137	shared_memory_amount -= alloc_size;
 138
 139	spin_unlock_irqrestore(&shared_memory_lock, flags);
 140}
 141
 142static void *dm_kvzalloc(size_t alloc_size, int node)
 143{
 144	void *p;
 145
 146	if (!claim_shared_memory(alloc_size))
 147		return NULL;
 148
 149	if (alloc_size <= KMALLOC_MAX_SIZE) {
 150		p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
 151		if (p)
 152			return p;
 153	}
 154	p = vzalloc_node(alloc_size, node);
 155	if (p)
 156		return p;
 157
 158	free_shared_memory(alloc_size);
 159
 160	return NULL;
 161}
 162
 163static void dm_kvfree(void *ptr, size_t alloc_size)
 164{
 165	if (!ptr)
 166		return;
 167
 168	free_shared_memory(alloc_size);
 169
 170	kvfree(ptr);
 171}
 172
 173static void dm_stat_free(struct rcu_head *head)
 174{
 175	int cpu;
 176	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
 177
 
 178	kfree(s->program_id);
 179	kfree(s->aux_data);
 180	for_each_possible_cpu(cpu) {
 181		dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
 182		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
 183	}
 184	dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
 185	dm_kvfree(s, s->shared_alloc_size);
 186}
 187
 188static int dm_stat_in_flight(struct dm_stat_shared *shared)
 189{
 190	return atomic_read(&shared->in_flight[READ]) +
 191	       atomic_read(&shared->in_flight[WRITE]);
 192}
 193
 194void dm_stats_init(struct dm_stats *stats)
 195{
 196	int cpu;
 197	struct dm_stats_last_position *last;
 198
 199	mutex_init(&stats->mutex);
 200	INIT_LIST_HEAD(&stats->list);
 201	stats->last = alloc_percpu(struct dm_stats_last_position);
 202	for_each_possible_cpu(cpu) {
 203		last = per_cpu_ptr(stats->last, cpu);
 204		last->last_sector = (sector_t)ULLONG_MAX;
 205		last->last_rw = UINT_MAX;
 206	}
 207}
 208
 209void dm_stats_cleanup(struct dm_stats *stats)
 210{
 211	size_t ni;
 212	struct dm_stat *s;
 213	struct dm_stat_shared *shared;
 214
 215	while (!list_empty(&stats->list)) {
 216		s = container_of(stats->list.next, struct dm_stat, list_entry);
 217		list_del(&s->list_entry);
 218		for (ni = 0; ni < s->n_entries; ni++) {
 219			shared = &s->stat_shared[ni];
 220			if (WARN_ON(dm_stat_in_flight(shared))) {
 221				DMCRIT("leaked in-flight counter at index %lu "
 222				       "(start %llu, end %llu, step %llu): reads %d, writes %d",
 223				       (unsigned long)ni,
 224				       (unsigned long long)s->start,
 225				       (unsigned long long)s->end,
 226				       (unsigned long long)s->step,
 227				       atomic_read(&shared->in_flight[READ]),
 228				       atomic_read(&shared->in_flight[WRITE]));
 229			}
 230		}
 231		dm_stat_free(&s->rcu_head);
 232	}
 233	free_percpu(stats->last);
 
 234}
 235
 236static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
 237			   sector_t step, unsigned stat_flags,
 238			   unsigned n_histogram_entries,
 239			   unsigned long long *histogram_boundaries,
 240			   const char *program_id, const char *aux_data,
 241			   void (*suspend_callback)(struct mapped_device *),
 242			   void (*resume_callback)(struct mapped_device *),
 243			   struct mapped_device *md)
 244{
 245	struct list_head *l;
 246	struct dm_stat *s, *tmp_s;
 247	sector_t n_entries;
 248	size_t ni;
 249	size_t shared_alloc_size;
 250	size_t percpu_alloc_size;
 251	size_t histogram_alloc_size;
 252	struct dm_stat_percpu *p;
 253	int cpu;
 254	int ret_id;
 255	int r;
 256
 257	if (end < start || !step)
 258		return -EINVAL;
 259
 260	n_entries = end - start;
 261	if (dm_sector_div64(n_entries, step))
 262		n_entries++;
 263
 264	if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
 265		return -EOVERFLOW;
 266
 267	shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
 268	if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
 269		return -EOVERFLOW;
 270
 271	percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
 272	if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
 273		return -EOVERFLOW;
 274
 275	histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
 276	if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
 277		return -EOVERFLOW;
 278
 279	if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
 280				 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
 281		return -ENOMEM;
 282
 283	s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
 284	if (!s)
 285		return -ENOMEM;
 286
 287	s->stat_flags = stat_flags;
 288	s->n_entries = n_entries;
 289	s->start = start;
 290	s->end = end;
 291	s->step = step;
 292	s->shared_alloc_size = shared_alloc_size;
 293	s->percpu_alloc_size = percpu_alloc_size;
 294	s->histogram_alloc_size = histogram_alloc_size;
 295
 296	s->n_histogram_entries = n_histogram_entries;
 297	s->histogram_boundaries = kmemdup(histogram_boundaries,
 298					  s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
 299	if (!s->histogram_boundaries) {
 300		r = -ENOMEM;
 301		goto out;
 302	}
 303
 304	s->program_id = kstrdup(program_id, GFP_KERNEL);
 305	if (!s->program_id) {
 306		r = -ENOMEM;
 307		goto out;
 308	}
 309	s->aux_data = kstrdup(aux_data, GFP_KERNEL);
 310	if (!s->aux_data) {
 311		r = -ENOMEM;
 312		goto out;
 313	}
 314
 315	for (ni = 0; ni < n_entries; ni++) {
 316		atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
 317		atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
 318	}
 319
 320	if (s->n_histogram_entries) {
 321		unsigned long long *hi;
 322		hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
 323		if (!hi) {
 324			r = -ENOMEM;
 325			goto out;
 326		}
 327		for (ni = 0; ni < n_entries; ni++) {
 328			s->stat_shared[ni].tmp.histogram = hi;
 329			hi += s->n_histogram_entries + 1;
 330		}
 331	}
 332
 333	for_each_possible_cpu(cpu) {
 334		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
 335		if (!p) {
 336			r = -ENOMEM;
 337			goto out;
 338		}
 339		s->stat_percpu[cpu] = p;
 340		if (s->n_histogram_entries) {
 341			unsigned long long *hi;
 342			hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
 343			if (!hi) {
 344				r = -ENOMEM;
 345				goto out;
 346			}
 347			for (ni = 0; ni < n_entries; ni++) {
 348				p[ni].histogram = hi;
 349				hi += s->n_histogram_entries + 1;
 350			}
 351		}
 352	}
 353
 354	/*
 355	 * Suspend/resume to make sure there is no i/o in flight,
 356	 * so that newly created statistics will be exact.
 357	 *
 358	 * (note: we couldn't suspend earlier because we must not
 359	 * allocate memory while suspended)
 360	 */
 361	suspend_callback(md);
 362
 363	mutex_lock(&stats->mutex);
 364	s->id = 0;
 365	list_for_each(l, &stats->list) {
 366		tmp_s = container_of(l, struct dm_stat, list_entry);
 367		if (WARN_ON(tmp_s->id < s->id)) {
 368			r = -EINVAL;
 369			goto out_unlock_resume;
 370		}
 371		if (tmp_s->id > s->id)
 372			break;
 373		if (unlikely(s->id == INT_MAX)) {
 374			r = -ENFILE;
 375			goto out_unlock_resume;
 376		}
 377		s->id++;
 378	}
 379	ret_id = s->id;
 380	list_add_tail_rcu(&s->list_entry, l);
 381	mutex_unlock(&stats->mutex);
 382
 383	resume_callback(md);
 384
 385	return ret_id;
 386
 387out_unlock_resume:
 388	mutex_unlock(&stats->mutex);
 389	resume_callback(md);
 390out:
 391	dm_stat_free(&s->rcu_head);
 392	return r;
 393}
 394
 395static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
 396{
 397	struct dm_stat *s;
 398
 399	list_for_each_entry(s, &stats->list, list_entry) {
 400		if (s->id > id)
 401			break;
 402		if (s->id == id)
 403			return s;
 404	}
 405
 406	return NULL;
 407}
 408
 409static int dm_stats_delete(struct dm_stats *stats, int id)
 410{
 411	struct dm_stat *s;
 412	int cpu;
 413
 414	mutex_lock(&stats->mutex);
 415
 416	s = __dm_stats_find(stats, id);
 417	if (!s) {
 418		mutex_unlock(&stats->mutex);
 419		return -ENOENT;
 420	}
 421
 422	list_del_rcu(&s->list_entry);
 423	mutex_unlock(&stats->mutex);
 424
 425	/*
 426	 * vfree can't be called from RCU callback
 427	 */
 428	for_each_possible_cpu(cpu)
 429		if (is_vmalloc_addr(s->stat_percpu) ||
 430		    is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
 431			goto do_sync_free;
 432	if (is_vmalloc_addr(s) ||
 433	    is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
 434do_sync_free:
 435		synchronize_rcu_expedited();
 436		dm_stat_free(&s->rcu_head);
 437	} else {
 438		ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
 439		call_rcu(&s->rcu_head, dm_stat_free);
 440	}
 441	return 0;
 442}
 443
 444static int dm_stats_list(struct dm_stats *stats, const char *program,
 445			 char *result, unsigned maxlen)
 446{
 447	struct dm_stat *s;
 448	sector_t len;
 449	unsigned sz = 0;
 450
 451	/*
 452	 * Output format:
 453	 *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
 454	 */
 455
 456	mutex_lock(&stats->mutex);
 457	list_for_each_entry(s, &stats->list, list_entry) {
 458		if (!program || !strcmp(program, s->program_id)) {
 459			len = s->end - s->start;
 460			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
 461				(unsigned long long)s->start,
 462				(unsigned long long)len,
 463				(unsigned long long)s->step,
 464				s->program_id,
 465				s->aux_data);
 466			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
 467				DMEMIT(" precise_timestamps");
 468			if (s->n_histogram_entries) {
 469				unsigned i;
 470				DMEMIT(" histogram:");
 471				for (i = 0; i < s->n_histogram_entries; i++) {
 472					if (i)
 473						DMEMIT(",");
 474					DMEMIT("%llu", s->histogram_boundaries[i]);
 475				}
 476			}
 477			DMEMIT("\n");
 478		}
 479	}
 480	mutex_unlock(&stats->mutex);
 481
 482	return 1;
 483}
 484
 485static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
 486			  struct dm_stat_percpu *p)
 487{
 488	/*
 489	 * This is racy, but so is part_round_stats_single.
 490	 */
 491	unsigned long long now, difference;
 492	unsigned in_flight_read, in_flight_write;
 493
 494	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
 495		now = jiffies;
 496	else
 497		now = ktime_to_ns(ktime_get());
 498
 499	difference = now - shared->stamp;
 500	if (!difference)
 501		return;
 502
 503	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
 504	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
 505	if (in_flight_read)
 506		p->io_ticks[READ] += difference;
 507	if (in_flight_write)
 508		p->io_ticks[WRITE] += difference;
 509	if (in_flight_read + in_flight_write) {
 510		p->io_ticks_total += difference;
 511		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
 512	}
 513	shared->stamp = now;
 514}
 515
 516static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
 517			      unsigned long bi_rw, sector_t len,
 518			      struct dm_stats_aux *stats_aux, bool end,
 519			      unsigned long duration_jiffies)
 520{
 521	unsigned long idx = bi_rw & REQ_WRITE;
 522	struct dm_stat_shared *shared = &s->stat_shared[entry];
 523	struct dm_stat_percpu *p;
 524
 525	/*
 526	 * For strict correctness we should use local_irq_save/restore
 527	 * instead of preempt_disable/enable.
 528	 *
 529	 * preempt_disable/enable is racy if the driver finishes bios
 530	 * from non-interrupt context as well as from interrupt context
 531	 * or from more different interrupts.
 532	 *
 533	 * On 64-bit architectures the race only results in not counting some
 534	 * events, so it is acceptable.  On 32-bit architectures the race could
 535	 * cause the counter going off by 2^32, so we need to do proper locking
 536	 * there.
 537	 *
 538	 * part_stat_lock()/part_stat_unlock() have this race too.
 539	 */
 540#if BITS_PER_LONG == 32
 541	unsigned long flags;
 542	local_irq_save(flags);
 543#else
 544	preempt_disable();
 545#endif
 546	p = &s->stat_percpu[smp_processor_id()][entry];
 547
 548	if (!end) {
 549		dm_stat_round(s, shared, p);
 550		atomic_inc(&shared->in_flight[idx]);
 551	} else {
 552		unsigned long long duration;
 553		dm_stat_round(s, shared, p);
 554		atomic_dec(&shared->in_flight[idx]);
 555		p->sectors[idx] += len;
 556		p->ios[idx] += 1;
 557		p->merges[idx] += stats_aux->merged;
 558		if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
 559			p->ticks[idx] += duration_jiffies;
 560			duration = jiffies_to_msecs(duration_jiffies);
 561		} else {
 562			p->ticks[idx] += stats_aux->duration_ns;
 563			duration = stats_aux->duration_ns;
 564		}
 565		if (s->n_histogram_entries) {
 566			unsigned lo = 0, hi = s->n_histogram_entries + 1;
 567			while (lo + 1 < hi) {
 568				unsigned mid = (lo + hi) / 2;
 569				if (s->histogram_boundaries[mid - 1] > duration) {
 570					hi = mid;
 571				} else {
 572					lo = mid;
 573				}
 574
 575			}
 576			p->histogram[lo]++;
 577		}
 578	}
 579
 580#if BITS_PER_LONG == 32
 581	local_irq_restore(flags);
 582#else
 583	preempt_enable();
 584#endif
 585}
 586
 587static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
 588			  sector_t bi_sector, sector_t end_sector,
 589			  bool end, unsigned long duration_jiffies,
 590			  struct dm_stats_aux *stats_aux)
 591{
 592	sector_t rel_sector, offset, todo, fragment_len;
 593	size_t entry;
 594
 595	if (end_sector <= s->start || bi_sector >= s->end)
 596		return;
 597	if (unlikely(bi_sector < s->start)) {
 598		rel_sector = 0;
 599		todo = end_sector - s->start;
 600	} else {
 601		rel_sector = bi_sector - s->start;
 602		todo = end_sector - bi_sector;
 603	}
 604	if (unlikely(end_sector > s->end))
 605		todo -= (end_sector - s->end);
 606
 607	offset = dm_sector_div64(rel_sector, s->step);
 608	entry = rel_sector;
 609	do {
 610		if (WARN_ON_ONCE(entry >= s->n_entries)) {
 611			DMCRIT("Invalid area access in region id %d", s->id);
 612			return;
 613		}
 614		fragment_len = todo;
 615		if (fragment_len > s->step - offset)
 616			fragment_len = s->step - offset;
 617		dm_stat_for_entry(s, entry, bi_rw, fragment_len,
 618				  stats_aux, end, duration_jiffies);
 619		todo -= fragment_len;
 620		entry++;
 621		offset = 0;
 622	} while (unlikely(todo != 0));
 623}
 624
 625void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
 626			 sector_t bi_sector, unsigned bi_sectors, bool end,
 627			 unsigned long duration_jiffies,
 628			 struct dm_stats_aux *stats_aux)
 629{
 630	struct dm_stat *s;
 631	sector_t end_sector;
 632	struct dm_stats_last_position *last;
 633	bool got_precise_time;
 634
 635	if (unlikely(!bi_sectors))
 636		return;
 637
 638	end_sector = bi_sector + bi_sectors;
 639
 640	if (!end) {
 641		/*
 642		 * A race condition can at worst result in the merged flag being
 643		 * misrepresented, so we don't have to disable preemption here.
 644		 */
 645		last = raw_cpu_ptr(stats->last);
 646		stats_aux->merged =
 647			(bi_sector == (ACCESS_ONCE(last->last_sector) &&
 648				       ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
 649					(ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
 650				       ));
 651		ACCESS_ONCE(last->last_sector) = end_sector;
 652		ACCESS_ONCE(last->last_rw) = bi_rw;
 653	}
 654
 655	rcu_read_lock();
 656
 657	got_precise_time = false;
 658	list_for_each_entry_rcu(s, &stats->list, list_entry) {
 659		if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
 660			if (!end)
 661				stats_aux->duration_ns = ktime_to_ns(ktime_get());
 662			else
 663				stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
 664			got_precise_time = true;
 665		}
 666		__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
 667	}
 668
 669	rcu_read_unlock();
 670}
 671
 672static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
 673						   struct dm_stat *s, size_t x)
 674{
 675	int cpu;
 676	struct dm_stat_percpu *p;
 677
 678	local_irq_disable();
 679	p = &s->stat_percpu[smp_processor_id()][x];
 680	dm_stat_round(s, shared, p);
 681	local_irq_enable();
 682
 683	shared->tmp.sectors[READ] = 0;
 684	shared->tmp.sectors[WRITE] = 0;
 685	shared->tmp.ios[READ] = 0;
 686	shared->tmp.ios[WRITE] = 0;
 687	shared->tmp.merges[READ] = 0;
 688	shared->tmp.merges[WRITE] = 0;
 689	shared->tmp.ticks[READ] = 0;
 690	shared->tmp.ticks[WRITE] = 0;
 691	shared->tmp.io_ticks[READ] = 0;
 692	shared->tmp.io_ticks[WRITE] = 0;
 693	shared->tmp.io_ticks_total = 0;
 694	shared->tmp.time_in_queue = 0;
 695
 696	if (s->n_histogram_entries)
 697		memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
 698
 699	for_each_possible_cpu(cpu) {
 700		p = &s->stat_percpu[cpu][x];
 701		shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
 702		shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
 703		shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
 704		shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
 705		shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
 706		shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
 707		shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
 708		shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
 709		shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
 710		shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
 711		shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
 712		shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
 713		if (s->n_histogram_entries) {
 714			unsigned i;
 715			for (i = 0; i < s->n_histogram_entries + 1; i++)
 716				shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
 717		}
 718	}
 719}
 720
 721static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
 722			    bool init_tmp_percpu_totals)
 723{
 724	size_t x;
 725	struct dm_stat_shared *shared;
 726	struct dm_stat_percpu *p;
 727
 728	for (x = idx_start; x < idx_end; x++) {
 729		shared = &s->stat_shared[x];
 730		if (init_tmp_percpu_totals)
 731			__dm_stat_init_temporary_percpu_totals(shared, s, x);
 732		local_irq_disable();
 733		p = &s->stat_percpu[smp_processor_id()][x];
 734		p->sectors[READ] -= shared->tmp.sectors[READ];
 735		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
 736		p->ios[READ] -= shared->tmp.ios[READ];
 737		p->ios[WRITE] -= shared->tmp.ios[WRITE];
 738		p->merges[READ] -= shared->tmp.merges[READ];
 739		p->merges[WRITE] -= shared->tmp.merges[WRITE];
 740		p->ticks[READ] -= shared->tmp.ticks[READ];
 741		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
 742		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
 743		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
 744		p->io_ticks_total -= shared->tmp.io_ticks_total;
 745		p->time_in_queue -= shared->tmp.time_in_queue;
 746		local_irq_enable();
 747		if (s->n_histogram_entries) {
 748			unsigned i;
 749			for (i = 0; i < s->n_histogram_entries + 1; i++) {
 750				local_irq_disable();
 751				p = &s->stat_percpu[smp_processor_id()][x];
 752				p->histogram[i] -= shared->tmp.histogram[i];
 753				local_irq_enable();
 754			}
 755		}
 756	}
 757}
 758
 759static int dm_stats_clear(struct dm_stats *stats, int id)
 760{
 761	struct dm_stat *s;
 762
 763	mutex_lock(&stats->mutex);
 764
 765	s = __dm_stats_find(stats, id);
 766	if (!s) {
 767		mutex_unlock(&stats->mutex);
 768		return -ENOENT;
 769	}
 770
 771	__dm_stat_clear(s, 0, s->n_entries, true);
 772
 773	mutex_unlock(&stats->mutex);
 774
 775	return 1;
 776}
 777
 778/*
 779 * This is like jiffies_to_msec, but works for 64-bit values.
 780 */
 781static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
 782{
 783	unsigned long long result;
 784	unsigned mult;
 785
 786	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
 787		return j;
 788
 789	result = 0;
 790	if (j)
 791		result = jiffies_to_msecs(j & 0x3fffff);
 792	if (j >= 1 << 22) {
 793		mult = jiffies_to_msecs(1 << 22);
 794		result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
 795	}
 796	if (j >= 1ULL << 44)
 797		result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
 798
 799	return result;
 800}
 801
 802static int dm_stats_print(struct dm_stats *stats, int id,
 803			  size_t idx_start, size_t idx_len,
 804			  bool clear, char *result, unsigned maxlen)
 805{
 806	unsigned sz = 0;
 807	struct dm_stat *s;
 808	size_t x;
 809	sector_t start, end, step;
 810	size_t idx_end;
 811	struct dm_stat_shared *shared;
 812
 813	/*
 814	 * Output format:
 815	 *   <start_sector>+<length> counters
 816	 */
 817
 818	mutex_lock(&stats->mutex);
 819
 820	s = __dm_stats_find(stats, id);
 821	if (!s) {
 822		mutex_unlock(&stats->mutex);
 823		return -ENOENT;
 824	}
 825
 826	idx_end = idx_start + idx_len;
 827	if (idx_end < idx_start ||
 828	    idx_end > s->n_entries)
 829		idx_end = s->n_entries;
 830
 831	if (idx_start > idx_end)
 832		idx_start = idx_end;
 833
 834	step = s->step;
 835	start = s->start + (step * idx_start);
 836
 837	for (x = idx_start; x < idx_end; x++, start = end) {
 838		shared = &s->stat_shared[x];
 839		end = start + step;
 840		if (unlikely(end > s->end))
 841			end = s->end;
 842
 843		__dm_stat_init_temporary_percpu_totals(shared, s, x);
 844
 845		DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
 846		       (unsigned long long)start,
 847		       (unsigned long long)step,
 848		       shared->tmp.ios[READ],
 849		       shared->tmp.merges[READ],
 850		       shared->tmp.sectors[READ],
 851		       dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
 852		       shared->tmp.ios[WRITE],
 853		       shared->tmp.merges[WRITE],
 854		       shared->tmp.sectors[WRITE],
 855		       dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
 856		       dm_stat_in_flight(shared),
 857		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
 858		       dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
 859		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
 860		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
 861		if (s->n_histogram_entries) {
 862			unsigned i;
 863			for (i = 0; i < s->n_histogram_entries + 1; i++) {
 864				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
 865			}
 866		}
 867		DMEMIT("\n");
 868
 869		if (unlikely(sz + 1 >= maxlen))
 870			goto buffer_overflow;
 871	}
 872
 873	if (clear)
 874		__dm_stat_clear(s, idx_start, idx_end, false);
 875
 876buffer_overflow:
 877	mutex_unlock(&stats->mutex);
 878
 879	return 1;
 880}
 881
 882static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
 883{
 884	struct dm_stat *s;
 885	const char *new_aux_data;
 886
 887	mutex_lock(&stats->mutex);
 888
 889	s = __dm_stats_find(stats, id);
 890	if (!s) {
 891		mutex_unlock(&stats->mutex);
 892		return -ENOENT;
 893	}
 894
 895	new_aux_data = kstrdup(aux_data, GFP_KERNEL);
 896	if (!new_aux_data) {
 897		mutex_unlock(&stats->mutex);
 898		return -ENOMEM;
 899	}
 900
 901	kfree(s->aux_data);
 902	s->aux_data = new_aux_data;
 903
 904	mutex_unlock(&stats->mutex);
 905
 906	return 0;
 907}
 908
 909static int parse_histogram(const char *h, unsigned *n_histogram_entries,
 910			   unsigned long long **histogram_boundaries)
 911{
 912	const char *q;
 913	unsigned n;
 914	unsigned long long last;
 915
 916	*n_histogram_entries = 1;
 917	for (q = h; *q; q++)
 918		if (*q == ',')
 919			(*n_histogram_entries)++;
 920
 921	*histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
 922	if (!*histogram_boundaries)
 923		return -ENOMEM;
 924
 925	n = 0;
 926	last = 0;
 927	while (1) {
 928		unsigned long long hi;
 929		int s;
 930		char ch;
 931		s = sscanf(h, "%llu%c", &hi, &ch);
 932		if (!s || (s == 2 && ch != ','))
 933			return -EINVAL;
 934		if (hi <= last)
 935			return -EINVAL;
 936		last = hi;
 937		(*histogram_boundaries)[n] = hi;
 938		if (s == 1)
 939			return 0;
 940		h = strchr(h, ',') + 1;
 941		n++;
 942	}
 943}
 944
 945static int message_stats_create(struct mapped_device *md,
 946				unsigned argc, char **argv,
 947				char *result, unsigned maxlen)
 948{
 949	int r;
 950	int id;
 951	char dummy;
 952	unsigned long long start, end, len, step;
 953	unsigned divisor;
 954	const char *program_id, *aux_data;
 955	unsigned stat_flags = 0;
 956
 957	unsigned n_histogram_entries = 0;
 958	unsigned long long *histogram_boundaries = NULL;
 959
 960	struct dm_arg_set as, as_backup;
 961	const char *a;
 962	unsigned feature_args;
 963
 964	/*
 965	 * Input format:
 966	 *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
 967	 */
 968
 969	if (argc < 3)
 970		goto ret_einval;
 971
 972	as.argc = argc;
 973	as.argv = argv;
 974	dm_consume_args(&as, 1);
 975
 976	a = dm_shift_arg(&as);
 977	if (!strcmp(a, "-")) {
 978		start = 0;
 979		len = dm_get_size(md);
 980		if (!len)
 981			len = 1;
 982	} else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
 983		   start != (sector_t)start || len != (sector_t)len)
 984		goto ret_einval;
 985
 986	end = start + len;
 987	if (start >= end)
 988		goto ret_einval;
 989
 990	a = dm_shift_arg(&as);
 991	if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
 992		if (!divisor)
 993			return -EINVAL;
 994		step = end - start;
 995		if (do_div(step, divisor))
 996			step++;
 997		if (!step)
 998			step = 1;
 999	} else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1000		   step != (sector_t)step || !step)
1001		goto ret_einval;
1002
1003	as_backup = as;
1004	a = dm_shift_arg(&as);
1005	if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1006		while (feature_args--) {
1007			a = dm_shift_arg(&as);
1008			if (!a)
1009				goto ret_einval;
1010			if (!strcasecmp(a, "precise_timestamps"))
1011				stat_flags |= STAT_PRECISE_TIMESTAMPS;
1012			else if (!strncasecmp(a, "histogram:", 10)) {
1013				if (n_histogram_entries)
1014					goto ret_einval;
1015				if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1016					goto ret;
1017			} else
1018				goto ret_einval;
1019		}
1020	} else {
1021		as = as_backup;
1022	}
1023
1024	program_id = "-";
1025	aux_data = "-";
1026
1027	a = dm_shift_arg(&as);
1028	if (a)
1029		program_id = a;
1030
1031	a = dm_shift_arg(&as);
1032	if (a)
1033		aux_data = a;
1034
1035	if (as.argc)
1036		goto ret_einval;
1037
1038	/*
1039	 * If a buffer overflow happens after we created the region,
1040	 * it's too late (the userspace would retry with a larger
1041	 * buffer, but the region id that caused the overflow is already
1042	 * leaked).  So we must detect buffer overflow in advance.
1043	 */
1044	snprintf(result, maxlen, "%d", INT_MAX);
1045	if (dm_message_test_buffer_overflow(result, maxlen)) {
1046		r = 1;
1047		goto ret;
1048	}
1049
1050	id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1051			     n_histogram_entries, histogram_boundaries, program_id, aux_data,
1052			     dm_internal_suspend_fast, dm_internal_resume_fast, md);
1053	if (id < 0) {
1054		r = id;
1055		goto ret;
1056	}
1057
1058	snprintf(result, maxlen, "%d", id);
1059
1060	r = 1;
1061	goto ret;
1062
1063ret_einval:
1064	r = -EINVAL;
1065ret:
1066	kfree(histogram_boundaries);
1067	return r;
1068}
1069
1070static int message_stats_delete(struct mapped_device *md,
1071				unsigned argc, char **argv)
1072{
1073	int id;
1074	char dummy;
1075
1076	if (argc != 2)
1077		return -EINVAL;
1078
1079	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1080		return -EINVAL;
1081
1082	return dm_stats_delete(dm_get_stats(md), id);
1083}
1084
1085static int message_stats_clear(struct mapped_device *md,
1086			       unsigned argc, char **argv)
1087{
1088	int id;
1089	char dummy;
1090
1091	if (argc != 2)
1092		return -EINVAL;
1093
1094	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1095		return -EINVAL;
1096
1097	return dm_stats_clear(dm_get_stats(md), id);
1098}
1099
1100static int message_stats_list(struct mapped_device *md,
1101			      unsigned argc, char **argv,
1102			      char *result, unsigned maxlen)
1103{
1104	int r;
1105	const char *program = NULL;
1106
1107	if (argc < 1 || argc > 2)
1108		return -EINVAL;
1109
1110	if (argc > 1) {
1111		program = kstrdup(argv[1], GFP_KERNEL);
1112		if (!program)
1113			return -ENOMEM;
1114	}
1115
1116	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1117
1118	kfree(program);
1119
1120	return r;
1121}
1122
1123static int message_stats_print(struct mapped_device *md,
1124			       unsigned argc, char **argv, bool clear,
1125			       char *result, unsigned maxlen)
1126{
1127	int id;
1128	char dummy;
1129	unsigned long idx_start = 0, idx_len = ULONG_MAX;
1130
1131	if (argc != 2 && argc != 4)
1132		return -EINVAL;
1133
1134	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1135		return -EINVAL;
1136
1137	if (argc > 3) {
1138		if (strcmp(argv[2], "-") &&
1139		    sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1140			return -EINVAL;
1141		if (strcmp(argv[3], "-") &&
1142		    sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1143			return -EINVAL;
1144	}
1145
1146	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1147			      result, maxlen);
1148}
1149
1150static int message_stats_set_aux(struct mapped_device *md,
1151				 unsigned argc, char **argv)
1152{
1153	int id;
1154	char dummy;
1155
1156	if (argc != 3)
1157		return -EINVAL;
1158
1159	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1160		return -EINVAL;
1161
1162	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1163}
1164
1165int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1166		     char *result, unsigned maxlen)
1167{
1168	int r;
1169
1170	/* All messages here must start with '@' */
1171	if (!strcasecmp(argv[0], "@stats_create"))
1172		r = message_stats_create(md, argc, argv, result, maxlen);
1173	else if (!strcasecmp(argv[0], "@stats_delete"))
1174		r = message_stats_delete(md, argc, argv);
1175	else if (!strcasecmp(argv[0], "@stats_clear"))
1176		r = message_stats_clear(md, argc, argv);
1177	else if (!strcasecmp(argv[0], "@stats_list"))
1178		r = message_stats_list(md, argc, argv, result, maxlen);
1179	else if (!strcasecmp(argv[0], "@stats_print"))
1180		r = message_stats_print(md, argc, argv, false, result, maxlen);
1181	else if (!strcasecmp(argv[0], "@stats_print_clear"))
1182		r = message_stats_print(md, argc, argv, true, result, maxlen);
1183	else if (!strcasecmp(argv[0], "@stats_set_aux"))
1184		r = message_stats_set_aux(md, argc, argv);
1185	else
1186		return 2; /* this wasn't a stats message */
1187
1188	if (r == -EINVAL)
1189		DMWARN("Invalid parameters for message %s", argv[0]);
1190
1191	return r;
1192}
1193
1194int __init dm_statistics_init(void)
1195{
1196	shared_memory_amount = 0;
1197	dm_stat_need_rcu_barrier = 0;
1198	return 0;
1199}
1200
1201void dm_statistics_exit(void)
1202{
1203	if (dm_stat_need_rcu_barrier)
1204		rcu_barrier();
1205	if (WARN_ON(shared_memory_amount))
1206		DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1207}
1208
1209module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1210MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/errno.h>
   3#include <linux/numa.h>
   4#include <linux/slab.h>
   5#include <linux/rculist.h>
   6#include <linux/threads.h>
   7#include <linux/preempt.h>
   8#include <linux/irqflags.h>
   9#include <linux/vmalloc.h>
  10#include <linux/mm.h>
  11#include <linux/module.h>
  12#include <linux/device-mapper.h>
  13
  14#include "dm-core.h"
  15#include "dm-stats.h"
  16
  17#define DM_MSG_PREFIX "stats"
  18
  19static int dm_stat_need_rcu_barrier;
  20
  21/*
  22 * Using 64-bit values to avoid overflow (which is a
  23 * problem that block/genhd.c's IO accounting has).
  24 */
  25struct dm_stat_percpu {
  26	unsigned long long sectors[2];
  27	unsigned long long ios[2];
  28	unsigned long long merges[2];
  29	unsigned long long ticks[2];
  30	unsigned long long io_ticks[2];
  31	unsigned long long io_ticks_total;
  32	unsigned long long time_in_queue;
  33	unsigned long long *histogram;
  34};
  35
  36struct dm_stat_shared {
  37	atomic_t in_flight[2];
  38	unsigned long long stamp;
  39	struct dm_stat_percpu tmp;
  40};
  41
  42struct dm_stat {
  43	struct list_head list_entry;
  44	int id;
  45	unsigned stat_flags;
  46	size_t n_entries;
  47	sector_t start;
  48	sector_t end;
  49	sector_t step;
  50	unsigned n_histogram_entries;
  51	unsigned long long *histogram_boundaries;
  52	const char *program_id;
  53	const char *aux_data;
  54	struct rcu_head rcu_head;
  55	size_t shared_alloc_size;
  56	size_t percpu_alloc_size;
  57	size_t histogram_alloc_size;
  58	struct dm_stat_percpu *stat_percpu[NR_CPUS];
  59	struct dm_stat_shared stat_shared[0];
  60};
  61
  62#define STAT_PRECISE_TIMESTAMPS		1
  63
  64struct dm_stats_last_position {
  65	sector_t last_sector;
  66	unsigned last_rw;
  67};
  68
  69/*
  70 * A typo on the command line could possibly make the kernel run out of memory
  71 * and crash. To prevent the crash we account all used memory. We fail if we
  72 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
  73 */
  74#define DM_STATS_MEMORY_FACTOR		4
  75#define DM_STATS_VMALLOC_FACTOR		2
  76
  77static DEFINE_SPINLOCK(shared_memory_lock);
  78
  79static unsigned long shared_memory_amount;
  80
  81static bool __check_shared_memory(size_t alloc_size)
  82{
  83	size_t a;
  84
  85	a = shared_memory_amount + alloc_size;
  86	if (a < shared_memory_amount)
  87		return false;
  88	if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
  89		return false;
  90#ifdef CONFIG_MMU
  91	if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
  92		return false;
  93#endif
  94	return true;
  95}
  96
  97static bool check_shared_memory(size_t alloc_size)
  98{
  99	bool ret;
 100
 101	spin_lock_irq(&shared_memory_lock);
 102
 103	ret = __check_shared_memory(alloc_size);
 104
 105	spin_unlock_irq(&shared_memory_lock);
 106
 107	return ret;
 108}
 109
 110static bool claim_shared_memory(size_t alloc_size)
 111{
 112	spin_lock_irq(&shared_memory_lock);
 113
 114	if (!__check_shared_memory(alloc_size)) {
 115		spin_unlock_irq(&shared_memory_lock);
 116		return false;
 117	}
 118
 119	shared_memory_amount += alloc_size;
 120
 121	spin_unlock_irq(&shared_memory_lock);
 122
 123	return true;
 124}
 125
 126static void free_shared_memory(size_t alloc_size)
 127{
 128	unsigned long flags;
 129
 130	spin_lock_irqsave(&shared_memory_lock, flags);
 131
 132	if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
 133		spin_unlock_irqrestore(&shared_memory_lock, flags);
 134		DMCRIT("Memory usage accounting bug.");
 135		return;
 136	}
 137
 138	shared_memory_amount -= alloc_size;
 139
 140	spin_unlock_irqrestore(&shared_memory_lock, flags);
 141}
 142
 143static void *dm_kvzalloc(size_t alloc_size, int node)
 144{
 145	void *p;
 146
 147	if (!claim_shared_memory(alloc_size))
 148		return NULL;
 149
 150	p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
 
 
 
 
 
 151	if (p)
 152		return p;
 153
 154	free_shared_memory(alloc_size);
 155
 156	return NULL;
 157}
 158
 159static void dm_kvfree(void *ptr, size_t alloc_size)
 160{
 161	if (!ptr)
 162		return;
 163
 164	free_shared_memory(alloc_size);
 165
 166	kvfree(ptr);
 167}
 168
 169static void dm_stat_free(struct rcu_head *head)
 170{
 171	int cpu;
 172	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
 173
 174	kfree(s->histogram_boundaries);
 175	kfree(s->program_id);
 176	kfree(s->aux_data);
 177	for_each_possible_cpu(cpu) {
 178		dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
 179		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
 180	}
 181	dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
 182	dm_kvfree(s, s->shared_alloc_size);
 183}
 184
 185static int dm_stat_in_flight(struct dm_stat_shared *shared)
 186{
 187	return atomic_read(&shared->in_flight[READ]) +
 188	       atomic_read(&shared->in_flight[WRITE]);
 189}
 190
 191void dm_stats_init(struct dm_stats *stats)
 192{
 193	int cpu;
 194	struct dm_stats_last_position *last;
 195
 196	mutex_init(&stats->mutex);
 197	INIT_LIST_HEAD(&stats->list);
 198	stats->last = alloc_percpu(struct dm_stats_last_position);
 199	for_each_possible_cpu(cpu) {
 200		last = per_cpu_ptr(stats->last, cpu);
 201		last->last_sector = (sector_t)ULLONG_MAX;
 202		last->last_rw = UINT_MAX;
 203	}
 204}
 205
 206void dm_stats_cleanup(struct dm_stats *stats)
 207{
 208	size_t ni;
 209	struct dm_stat *s;
 210	struct dm_stat_shared *shared;
 211
 212	while (!list_empty(&stats->list)) {
 213		s = container_of(stats->list.next, struct dm_stat, list_entry);
 214		list_del(&s->list_entry);
 215		for (ni = 0; ni < s->n_entries; ni++) {
 216			shared = &s->stat_shared[ni];
 217			if (WARN_ON(dm_stat_in_flight(shared))) {
 218				DMCRIT("leaked in-flight counter at index %lu "
 219				       "(start %llu, end %llu, step %llu): reads %d, writes %d",
 220				       (unsigned long)ni,
 221				       (unsigned long long)s->start,
 222				       (unsigned long long)s->end,
 223				       (unsigned long long)s->step,
 224				       atomic_read(&shared->in_flight[READ]),
 225				       atomic_read(&shared->in_flight[WRITE]));
 226			}
 227		}
 228		dm_stat_free(&s->rcu_head);
 229	}
 230	free_percpu(stats->last);
 231	mutex_destroy(&stats->mutex);
 232}
 233
 234static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
 235			   sector_t step, unsigned stat_flags,
 236			   unsigned n_histogram_entries,
 237			   unsigned long long *histogram_boundaries,
 238			   const char *program_id, const char *aux_data,
 239			   void (*suspend_callback)(struct mapped_device *),
 240			   void (*resume_callback)(struct mapped_device *),
 241			   struct mapped_device *md)
 242{
 243	struct list_head *l;
 244	struct dm_stat *s, *tmp_s;
 245	sector_t n_entries;
 246	size_t ni;
 247	size_t shared_alloc_size;
 248	size_t percpu_alloc_size;
 249	size_t histogram_alloc_size;
 250	struct dm_stat_percpu *p;
 251	int cpu;
 252	int ret_id;
 253	int r;
 254
 255	if (end < start || !step)
 256		return -EINVAL;
 257
 258	n_entries = end - start;
 259	if (dm_sector_div64(n_entries, step))
 260		n_entries++;
 261
 262	if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
 263		return -EOVERFLOW;
 264
 265	shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
 266	if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
 267		return -EOVERFLOW;
 268
 269	percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
 270	if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
 271		return -EOVERFLOW;
 272
 273	histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
 274	if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
 275		return -EOVERFLOW;
 276
 277	if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
 278				 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
 279		return -ENOMEM;
 280
 281	s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
 282	if (!s)
 283		return -ENOMEM;
 284
 285	s->stat_flags = stat_flags;
 286	s->n_entries = n_entries;
 287	s->start = start;
 288	s->end = end;
 289	s->step = step;
 290	s->shared_alloc_size = shared_alloc_size;
 291	s->percpu_alloc_size = percpu_alloc_size;
 292	s->histogram_alloc_size = histogram_alloc_size;
 293
 294	s->n_histogram_entries = n_histogram_entries;
 295	s->histogram_boundaries = kmemdup(histogram_boundaries,
 296					  s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
 297	if (!s->histogram_boundaries) {
 298		r = -ENOMEM;
 299		goto out;
 300	}
 301
 302	s->program_id = kstrdup(program_id, GFP_KERNEL);
 303	if (!s->program_id) {
 304		r = -ENOMEM;
 305		goto out;
 306	}
 307	s->aux_data = kstrdup(aux_data, GFP_KERNEL);
 308	if (!s->aux_data) {
 309		r = -ENOMEM;
 310		goto out;
 311	}
 312
 313	for (ni = 0; ni < n_entries; ni++) {
 314		atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
 315		atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
 316	}
 317
 318	if (s->n_histogram_entries) {
 319		unsigned long long *hi;
 320		hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
 321		if (!hi) {
 322			r = -ENOMEM;
 323			goto out;
 324		}
 325		for (ni = 0; ni < n_entries; ni++) {
 326			s->stat_shared[ni].tmp.histogram = hi;
 327			hi += s->n_histogram_entries + 1;
 328		}
 329	}
 330
 331	for_each_possible_cpu(cpu) {
 332		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
 333		if (!p) {
 334			r = -ENOMEM;
 335			goto out;
 336		}
 337		s->stat_percpu[cpu] = p;
 338		if (s->n_histogram_entries) {
 339			unsigned long long *hi;
 340			hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
 341			if (!hi) {
 342				r = -ENOMEM;
 343				goto out;
 344			}
 345			for (ni = 0; ni < n_entries; ni++) {
 346				p[ni].histogram = hi;
 347				hi += s->n_histogram_entries + 1;
 348			}
 349		}
 350	}
 351
 352	/*
 353	 * Suspend/resume to make sure there is no i/o in flight,
 354	 * so that newly created statistics will be exact.
 355	 *
 356	 * (note: we couldn't suspend earlier because we must not
 357	 * allocate memory while suspended)
 358	 */
 359	suspend_callback(md);
 360
 361	mutex_lock(&stats->mutex);
 362	s->id = 0;
 363	list_for_each(l, &stats->list) {
 364		tmp_s = container_of(l, struct dm_stat, list_entry);
 365		if (WARN_ON(tmp_s->id < s->id)) {
 366			r = -EINVAL;
 367			goto out_unlock_resume;
 368		}
 369		if (tmp_s->id > s->id)
 370			break;
 371		if (unlikely(s->id == INT_MAX)) {
 372			r = -ENFILE;
 373			goto out_unlock_resume;
 374		}
 375		s->id++;
 376	}
 377	ret_id = s->id;
 378	list_add_tail_rcu(&s->list_entry, l);
 379	mutex_unlock(&stats->mutex);
 380
 381	resume_callback(md);
 382
 383	return ret_id;
 384
 385out_unlock_resume:
 386	mutex_unlock(&stats->mutex);
 387	resume_callback(md);
 388out:
 389	dm_stat_free(&s->rcu_head);
 390	return r;
 391}
 392
 393static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
 394{
 395	struct dm_stat *s;
 396
 397	list_for_each_entry(s, &stats->list, list_entry) {
 398		if (s->id > id)
 399			break;
 400		if (s->id == id)
 401			return s;
 402	}
 403
 404	return NULL;
 405}
 406
 407static int dm_stats_delete(struct dm_stats *stats, int id)
 408{
 409	struct dm_stat *s;
 410	int cpu;
 411
 412	mutex_lock(&stats->mutex);
 413
 414	s = __dm_stats_find(stats, id);
 415	if (!s) {
 416		mutex_unlock(&stats->mutex);
 417		return -ENOENT;
 418	}
 419
 420	list_del_rcu(&s->list_entry);
 421	mutex_unlock(&stats->mutex);
 422
 423	/*
 424	 * vfree can't be called from RCU callback
 425	 */
 426	for_each_possible_cpu(cpu)
 427		if (is_vmalloc_addr(s->stat_percpu) ||
 428		    is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
 429			goto do_sync_free;
 430	if (is_vmalloc_addr(s) ||
 431	    is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
 432do_sync_free:
 433		synchronize_rcu_expedited();
 434		dm_stat_free(&s->rcu_head);
 435	} else {
 436		WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
 437		call_rcu(&s->rcu_head, dm_stat_free);
 438	}
 439	return 0;
 440}
 441
 442static int dm_stats_list(struct dm_stats *stats, const char *program,
 443			 char *result, unsigned maxlen)
 444{
 445	struct dm_stat *s;
 446	sector_t len;
 447	unsigned sz = 0;
 448
 449	/*
 450	 * Output format:
 451	 *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
 452	 */
 453
 454	mutex_lock(&stats->mutex);
 455	list_for_each_entry(s, &stats->list, list_entry) {
 456		if (!program || !strcmp(program, s->program_id)) {
 457			len = s->end - s->start;
 458			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
 459				(unsigned long long)s->start,
 460				(unsigned long long)len,
 461				(unsigned long long)s->step,
 462				s->program_id,
 463				s->aux_data);
 464			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
 465				DMEMIT(" precise_timestamps");
 466			if (s->n_histogram_entries) {
 467				unsigned i;
 468				DMEMIT(" histogram:");
 469				for (i = 0; i < s->n_histogram_entries; i++) {
 470					if (i)
 471						DMEMIT(",");
 472					DMEMIT("%llu", s->histogram_boundaries[i]);
 473				}
 474			}
 475			DMEMIT("\n");
 476		}
 477	}
 478	mutex_unlock(&stats->mutex);
 479
 480	return 1;
 481}
 482
 483static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
 484			  struct dm_stat_percpu *p)
 485{
 486	/*
 487	 * This is racy, but so is part_round_stats_single.
 488	 */
 489	unsigned long long now, difference;
 490	unsigned in_flight_read, in_flight_write;
 491
 492	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
 493		now = jiffies;
 494	else
 495		now = ktime_to_ns(ktime_get());
 496
 497	difference = now - shared->stamp;
 498	if (!difference)
 499		return;
 500
 501	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
 502	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
 503	if (in_flight_read)
 504		p->io_ticks[READ] += difference;
 505	if (in_flight_write)
 506		p->io_ticks[WRITE] += difference;
 507	if (in_flight_read + in_flight_write) {
 508		p->io_ticks_total += difference;
 509		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
 510	}
 511	shared->stamp = now;
 512}
 513
 514static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
 515			      int idx, sector_t len,
 516			      struct dm_stats_aux *stats_aux, bool end,
 517			      unsigned long duration_jiffies)
 518{
 
 519	struct dm_stat_shared *shared = &s->stat_shared[entry];
 520	struct dm_stat_percpu *p;
 521
 522	/*
 523	 * For strict correctness we should use local_irq_save/restore
 524	 * instead of preempt_disable/enable.
 525	 *
 526	 * preempt_disable/enable is racy if the driver finishes bios
 527	 * from non-interrupt context as well as from interrupt context
 528	 * or from more different interrupts.
 529	 *
 530	 * On 64-bit architectures the race only results in not counting some
 531	 * events, so it is acceptable.  On 32-bit architectures the race could
 532	 * cause the counter going off by 2^32, so we need to do proper locking
 533	 * there.
 534	 *
 535	 * part_stat_lock()/part_stat_unlock() have this race too.
 536	 */
 537#if BITS_PER_LONG == 32
 538	unsigned long flags;
 539	local_irq_save(flags);
 540#else
 541	preempt_disable();
 542#endif
 543	p = &s->stat_percpu[smp_processor_id()][entry];
 544
 545	if (!end) {
 546		dm_stat_round(s, shared, p);
 547		atomic_inc(&shared->in_flight[idx]);
 548	} else {
 549		unsigned long long duration;
 550		dm_stat_round(s, shared, p);
 551		atomic_dec(&shared->in_flight[idx]);
 552		p->sectors[idx] += len;
 553		p->ios[idx] += 1;
 554		p->merges[idx] += stats_aux->merged;
 555		if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
 556			p->ticks[idx] += duration_jiffies;
 557			duration = jiffies_to_msecs(duration_jiffies);
 558		} else {
 559			p->ticks[idx] += stats_aux->duration_ns;
 560			duration = stats_aux->duration_ns;
 561		}
 562		if (s->n_histogram_entries) {
 563			unsigned lo = 0, hi = s->n_histogram_entries + 1;
 564			while (lo + 1 < hi) {
 565				unsigned mid = (lo + hi) / 2;
 566				if (s->histogram_boundaries[mid - 1] > duration) {
 567					hi = mid;
 568				} else {
 569					lo = mid;
 570				}
 571
 572			}
 573			p->histogram[lo]++;
 574		}
 575	}
 576
 577#if BITS_PER_LONG == 32
 578	local_irq_restore(flags);
 579#else
 580	preempt_enable();
 581#endif
 582}
 583
 584static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
 585			  sector_t bi_sector, sector_t end_sector,
 586			  bool end, unsigned long duration_jiffies,
 587			  struct dm_stats_aux *stats_aux)
 588{
 589	sector_t rel_sector, offset, todo, fragment_len;
 590	size_t entry;
 591
 592	if (end_sector <= s->start || bi_sector >= s->end)
 593		return;
 594	if (unlikely(bi_sector < s->start)) {
 595		rel_sector = 0;
 596		todo = end_sector - s->start;
 597	} else {
 598		rel_sector = bi_sector - s->start;
 599		todo = end_sector - bi_sector;
 600	}
 601	if (unlikely(end_sector > s->end))
 602		todo -= (end_sector - s->end);
 603
 604	offset = dm_sector_div64(rel_sector, s->step);
 605	entry = rel_sector;
 606	do {
 607		if (WARN_ON_ONCE(entry >= s->n_entries)) {
 608			DMCRIT("Invalid area access in region id %d", s->id);
 609			return;
 610		}
 611		fragment_len = todo;
 612		if (fragment_len > s->step - offset)
 613			fragment_len = s->step - offset;
 614		dm_stat_for_entry(s, entry, bi_rw, fragment_len,
 615				  stats_aux, end, duration_jiffies);
 616		todo -= fragment_len;
 617		entry++;
 618		offset = 0;
 619	} while (unlikely(todo != 0));
 620}
 621
 622void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
 623			 sector_t bi_sector, unsigned bi_sectors, bool end,
 624			 unsigned long duration_jiffies,
 625			 struct dm_stats_aux *stats_aux)
 626{
 627	struct dm_stat *s;
 628	sector_t end_sector;
 629	struct dm_stats_last_position *last;
 630	bool got_precise_time;
 631
 632	if (unlikely(!bi_sectors))
 633		return;
 634
 635	end_sector = bi_sector + bi_sectors;
 636
 637	if (!end) {
 638		/*
 639		 * A race condition can at worst result in the merged flag being
 640		 * misrepresented, so we don't have to disable preemption here.
 641		 */
 642		last = raw_cpu_ptr(stats->last);
 643		stats_aux->merged =
 644			(bi_sector == (READ_ONCE(last->last_sector) &&
 645				       ((bi_rw == WRITE) ==
 646					(READ_ONCE(last->last_rw) == WRITE))
 647				       ));
 648		WRITE_ONCE(last->last_sector, end_sector);
 649		WRITE_ONCE(last->last_rw, bi_rw);
 650	}
 651
 652	rcu_read_lock();
 653
 654	got_precise_time = false;
 655	list_for_each_entry_rcu(s, &stats->list, list_entry) {
 656		if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
 657			if (!end)
 658				stats_aux->duration_ns = ktime_to_ns(ktime_get());
 659			else
 660				stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
 661			got_precise_time = true;
 662		}
 663		__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
 664	}
 665
 666	rcu_read_unlock();
 667}
 668
 669static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
 670						   struct dm_stat *s, size_t x)
 671{
 672	int cpu;
 673	struct dm_stat_percpu *p;
 674
 675	local_irq_disable();
 676	p = &s->stat_percpu[smp_processor_id()][x];
 677	dm_stat_round(s, shared, p);
 678	local_irq_enable();
 679
 680	shared->tmp.sectors[READ] = 0;
 681	shared->tmp.sectors[WRITE] = 0;
 682	shared->tmp.ios[READ] = 0;
 683	shared->tmp.ios[WRITE] = 0;
 684	shared->tmp.merges[READ] = 0;
 685	shared->tmp.merges[WRITE] = 0;
 686	shared->tmp.ticks[READ] = 0;
 687	shared->tmp.ticks[WRITE] = 0;
 688	shared->tmp.io_ticks[READ] = 0;
 689	shared->tmp.io_ticks[WRITE] = 0;
 690	shared->tmp.io_ticks_total = 0;
 691	shared->tmp.time_in_queue = 0;
 692
 693	if (s->n_histogram_entries)
 694		memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
 695
 696	for_each_possible_cpu(cpu) {
 697		p = &s->stat_percpu[cpu][x];
 698		shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
 699		shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
 700		shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
 701		shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
 702		shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
 703		shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
 704		shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
 705		shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
 706		shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
 707		shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
 708		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
 709		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
 710		if (s->n_histogram_entries) {
 711			unsigned i;
 712			for (i = 0; i < s->n_histogram_entries + 1; i++)
 713				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
 714		}
 715	}
 716}
 717
 718static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
 719			    bool init_tmp_percpu_totals)
 720{
 721	size_t x;
 722	struct dm_stat_shared *shared;
 723	struct dm_stat_percpu *p;
 724
 725	for (x = idx_start; x < idx_end; x++) {
 726		shared = &s->stat_shared[x];
 727		if (init_tmp_percpu_totals)
 728			__dm_stat_init_temporary_percpu_totals(shared, s, x);
 729		local_irq_disable();
 730		p = &s->stat_percpu[smp_processor_id()][x];
 731		p->sectors[READ] -= shared->tmp.sectors[READ];
 732		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
 733		p->ios[READ] -= shared->tmp.ios[READ];
 734		p->ios[WRITE] -= shared->tmp.ios[WRITE];
 735		p->merges[READ] -= shared->tmp.merges[READ];
 736		p->merges[WRITE] -= shared->tmp.merges[WRITE];
 737		p->ticks[READ] -= shared->tmp.ticks[READ];
 738		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
 739		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
 740		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
 741		p->io_ticks_total -= shared->tmp.io_ticks_total;
 742		p->time_in_queue -= shared->tmp.time_in_queue;
 743		local_irq_enable();
 744		if (s->n_histogram_entries) {
 745			unsigned i;
 746			for (i = 0; i < s->n_histogram_entries + 1; i++) {
 747				local_irq_disable();
 748				p = &s->stat_percpu[smp_processor_id()][x];
 749				p->histogram[i] -= shared->tmp.histogram[i];
 750				local_irq_enable();
 751			}
 752		}
 753	}
 754}
 755
 756static int dm_stats_clear(struct dm_stats *stats, int id)
 757{
 758	struct dm_stat *s;
 759
 760	mutex_lock(&stats->mutex);
 761
 762	s = __dm_stats_find(stats, id);
 763	if (!s) {
 764		mutex_unlock(&stats->mutex);
 765		return -ENOENT;
 766	}
 767
 768	__dm_stat_clear(s, 0, s->n_entries, true);
 769
 770	mutex_unlock(&stats->mutex);
 771
 772	return 1;
 773}
 774
 775/*
 776 * This is like jiffies_to_msec, but works for 64-bit values.
 777 */
 778static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
 779{
 780	unsigned long long result;
 781	unsigned mult;
 782
 783	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
 784		return j;
 785
 786	result = 0;
 787	if (j)
 788		result = jiffies_to_msecs(j & 0x3fffff);
 789	if (j >= 1 << 22) {
 790		mult = jiffies_to_msecs(1 << 22);
 791		result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
 792	}
 793	if (j >= 1ULL << 44)
 794		result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
 795
 796	return result;
 797}
 798
 799static int dm_stats_print(struct dm_stats *stats, int id,
 800			  size_t idx_start, size_t idx_len,
 801			  bool clear, char *result, unsigned maxlen)
 802{
 803	unsigned sz = 0;
 804	struct dm_stat *s;
 805	size_t x;
 806	sector_t start, end, step;
 807	size_t idx_end;
 808	struct dm_stat_shared *shared;
 809
 810	/*
 811	 * Output format:
 812	 *   <start_sector>+<length> counters
 813	 */
 814
 815	mutex_lock(&stats->mutex);
 816
 817	s = __dm_stats_find(stats, id);
 818	if (!s) {
 819		mutex_unlock(&stats->mutex);
 820		return -ENOENT;
 821	}
 822
 823	idx_end = idx_start + idx_len;
 824	if (idx_end < idx_start ||
 825	    idx_end > s->n_entries)
 826		idx_end = s->n_entries;
 827
 828	if (idx_start > idx_end)
 829		idx_start = idx_end;
 830
 831	step = s->step;
 832	start = s->start + (step * idx_start);
 833
 834	for (x = idx_start; x < idx_end; x++, start = end) {
 835		shared = &s->stat_shared[x];
 836		end = start + step;
 837		if (unlikely(end > s->end))
 838			end = s->end;
 839
 840		__dm_stat_init_temporary_percpu_totals(shared, s, x);
 841
 842		DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
 843		       (unsigned long long)start,
 844		       (unsigned long long)step,
 845		       shared->tmp.ios[READ],
 846		       shared->tmp.merges[READ],
 847		       shared->tmp.sectors[READ],
 848		       dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
 849		       shared->tmp.ios[WRITE],
 850		       shared->tmp.merges[WRITE],
 851		       shared->tmp.sectors[WRITE],
 852		       dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
 853		       dm_stat_in_flight(shared),
 854		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
 855		       dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
 856		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
 857		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
 858		if (s->n_histogram_entries) {
 859			unsigned i;
 860			for (i = 0; i < s->n_histogram_entries + 1; i++) {
 861				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
 862			}
 863		}
 864		DMEMIT("\n");
 865
 866		if (unlikely(sz + 1 >= maxlen))
 867			goto buffer_overflow;
 868	}
 869
 870	if (clear)
 871		__dm_stat_clear(s, idx_start, idx_end, false);
 872
 873buffer_overflow:
 874	mutex_unlock(&stats->mutex);
 875
 876	return 1;
 877}
 878
 879static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
 880{
 881	struct dm_stat *s;
 882	const char *new_aux_data;
 883
 884	mutex_lock(&stats->mutex);
 885
 886	s = __dm_stats_find(stats, id);
 887	if (!s) {
 888		mutex_unlock(&stats->mutex);
 889		return -ENOENT;
 890	}
 891
 892	new_aux_data = kstrdup(aux_data, GFP_KERNEL);
 893	if (!new_aux_data) {
 894		mutex_unlock(&stats->mutex);
 895		return -ENOMEM;
 896	}
 897
 898	kfree(s->aux_data);
 899	s->aux_data = new_aux_data;
 900
 901	mutex_unlock(&stats->mutex);
 902
 903	return 0;
 904}
 905
 906static int parse_histogram(const char *h, unsigned *n_histogram_entries,
 907			   unsigned long long **histogram_boundaries)
 908{
 909	const char *q;
 910	unsigned n;
 911	unsigned long long last;
 912
 913	*n_histogram_entries = 1;
 914	for (q = h; *q; q++)
 915		if (*q == ',')
 916			(*n_histogram_entries)++;
 917
 918	*histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
 919	if (!*histogram_boundaries)
 920		return -ENOMEM;
 921
 922	n = 0;
 923	last = 0;
 924	while (1) {
 925		unsigned long long hi;
 926		int s;
 927		char ch;
 928		s = sscanf(h, "%llu%c", &hi, &ch);
 929		if (!s || (s == 2 && ch != ','))
 930			return -EINVAL;
 931		if (hi <= last)
 932			return -EINVAL;
 933		last = hi;
 934		(*histogram_boundaries)[n] = hi;
 935		if (s == 1)
 936			return 0;
 937		h = strchr(h, ',') + 1;
 938		n++;
 939	}
 940}
 941
 942static int message_stats_create(struct mapped_device *md,
 943				unsigned argc, char **argv,
 944				char *result, unsigned maxlen)
 945{
 946	int r;
 947	int id;
 948	char dummy;
 949	unsigned long long start, end, len, step;
 950	unsigned divisor;
 951	const char *program_id, *aux_data;
 952	unsigned stat_flags = 0;
 953
 954	unsigned n_histogram_entries = 0;
 955	unsigned long long *histogram_boundaries = NULL;
 956
 957	struct dm_arg_set as, as_backup;
 958	const char *a;
 959	unsigned feature_args;
 960
 961	/*
 962	 * Input format:
 963	 *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
 964	 */
 965
 966	if (argc < 3)
 967		goto ret_einval;
 968
 969	as.argc = argc;
 970	as.argv = argv;
 971	dm_consume_args(&as, 1);
 972
 973	a = dm_shift_arg(&as);
 974	if (!strcmp(a, "-")) {
 975		start = 0;
 976		len = dm_get_size(md);
 977		if (!len)
 978			len = 1;
 979	} else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
 980		   start != (sector_t)start || len != (sector_t)len)
 981		goto ret_einval;
 982
 983	end = start + len;
 984	if (start >= end)
 985		goto ret_einval;
 986
 987	a = dm_shift_arg(&as);
 988	if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
 989		if (!divisor)
 990			return -EINVAL;
 991		step = end - start;
 992		if (do_div(step, divisor))
 993			step++;
 994		if (!step)
 995			step = 1;
 996	} else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
 997		   step != (sector_t)step || !step)
 998		goto ret_einval;
 999
1000	as_backup = as;
1001	a = dm_shift_arg(&as);
1002	if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1003		while (feature_args--) {
1004			a = dm_shift_arg(&as);
1005			if (!a)
1006				goto ret_einval;
1007			if (!strcasecmp(a, "precise_timestamps"))
1008				stat_flags |= STAT_PRECISE_TIMESTAMPS;
1009			else if (!strncasecmp(a, "histogram:", 10)) {
1010				if (n_histogram_entries)
1011					goto ret_einval;
1012				if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1013					goto ret;
1014			} else
1015				goto ret_einval;
1016		}
1017	} else {
1018		as = as_backup;
1019	}
1020
1021	program_id = "-";
1022	aux_data = "-";
1023
1024	a = dm_shift_arg(&as);
1025	if (a)
1026		program_id = a;
1027
1028	a = dm_shift_arg(&as);
1029	if (a)
1030		aux_data = a;
1031
1032	if (as.argc)
1033		goto ret_einval;
1034
1035	/*
1036	 * If a buffer overflow happens after we created the region,
1037	 * it's too late (the userspace would retry with a larger
1038	 * buffer, but the region id that caused the overflow is already
1039	 * leaked).  So we must detect buffer overflow in advance.
1040	 */
1041	snprintf(result, maxlen, "%d", INT_MAX);
1042	if (dm_message_test_buffer_overflow(result, maxlen)) {
1043		r = 1;
1044		goto ret;
1045	}
1046
1047	id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1048			     n_histogram_entries, histogram_boundaries, program_id, aux_data,
1049			     dm_internal_suspend_fast, dm_internal_resume_fast, md);
1050	if (id < 0) {
1051		r = id;
1052		goto ret;
1053	}
1054
1055	snprintf(result, maxlen, "%d", id);
1056
1057	r = 1;
1058	goto ret;
1059
1060ret_einval:
1061	r = -EINVAL;
1062ret:
1063	kfree(histogram_boundaries);
1064	return r;
1065}
1066
1067static int message_stats_delete(struct mapped_device *md,
1068				unsigned argc, char **argv)
1069{
1070	int id;
1071	char dummy;
1072
1073	if (argc != 2)
1074		return -EINVAL;
1075
1076	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1077		return -EINVAL;
1078
1079	return dm_stats_delete(dm_get_stats(md), id);
1080}
1081
1082static int message_stats_clear(struct mapped_device *md,
1083			       unsigned argc, char **argv)
1084{
1085	int id;
1086	char dummy;
1087
1088	if (argc != 2)
1089		return -EINVAL;
1090
1091	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1092		return -EINVAL;
1093
1094	return dm_stats_clear(dm_get_stats(md), id);
1095}
1096
1097static int message_stats_list(struct mapped_device *md,
1098			      unsigned argc, char **argv,
1099			      char *result, unsigned maxlen)
1100{
1101	int r;
1102	const char *program = NULL;
1103
1104	if (argc < 1 || argc > 2)
1105		return -EINVAL;
1106
1107	if (argc > 1) {
1108		program = kstrdup(argv[1], GFP_KERNEL);
1109		if (!program)
1110			return -ENOMEM;
1111	}
1112
1113	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1114
1115	kfree(program);
1116
1117	return r;
1118}
1119
1120static int message_stats_print(struct mapped_device *md,
1121			       unsigned argc, char **argv, bool clear,
1122			       char *result, unsigned maxlen)
1123{
1124	int id;
1125	char dummy;
1126	unsigned long idx_start = 0, idx_len = ULONG_MAX;
1127
1128	if (argc != 2 && argc != 4)
1129		return -EINVAL;
1130
1131	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1132		return -EINVAL;
1133
1134	if (argc > 3) {
1135		if (strcmp(argv[2], "-") &&
1136		    sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1137			return -EINVAL;
1138		if (strcmp(argv[3], "-") &&
1139		    sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1140			return -EINVAL;
1141	}
1142
1143	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1144			      result, maxlen);
1145}
1146
1147static int message_stats_set_aux(struct mapped_device *md,
1148				 unsigned argc, char **argv)
1149{
1150	int id;
1151	char dummy;
1152
1153	if (argc != 3)
1154		return -EINVAL;
1155
1156	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1157		return -EINVAL;
1158
1159	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1160}
1161
1162int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1163		     char *result, unsigned maxlen)
1164{
1165	int r;
1166
1167	/* All messages here must start with '@' */
1168	if (!strcasecmp(argv[0], "@stats_create"))
1169		r = message_stats_create(md, argc, argv, result, maxlen);
1170	else if (!strcasecmp(argv[0], "@stats_delete"))
1171		r = message_stats_delete(md, argc, argv);
1172	else if (!strcasecmp(argv[0], "@stats_clear"))
1173		r = message_stats_clear(md, argc, argv);
1174	else if (!strcasecmp(argv[0], "@stats_list"))
1175		r = message_stats_list(md, argc, argv, result, maxlen);
1176	else if (!strcasecmp(argv[0], "@stats_print"))
1177		r = message_stats_print(md, argc, argv, false, result, maxlen);
1178	else if (!strcasecmp(argv[0], "@stats_print_clear"))
1179		r = message_stats_print(md, argc, argv, true, result, maxlen);
1180	else if (!strcasecmp(argv[0], "@stats_set_aux"))
1181		r = message_stats_set_aux(md, argc, argv);
1182	else
1183		return 2; /* this wasn't a stats message */
1184
1185	if (r == -EINVAL)
1186		DMWARN("Invalid parameters for message %s", argv[0]);
1187
1188	return r;
1189}
1190
1191int __init dm_statistics_init(void)
1192{
1193	shared_memory_amount = 0;
1194	dm_stat_need_rcu_barrier = 0;
1195	return 0;
1196}
1197
1198void dm_statistics_exit(void)
1199{
1200	if (dm_stat_need_rcu_barrier)
1201		rcu_barrier();
1202	if (WARN_ON(shared_memory_amount))
1203		DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1204}
1205
1206module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1207MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");