Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
   4 *
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/percpu.h>
  13#include <linux/init.h>
  14#include <linux/mutex.h>
  15#include <linux/slab.h>
  16#include <linux/debugfs.h>
  17#include <linux/export.h>
  18#include <linux/time.h>
  19#include <linux/uaccess.h>
  20#include <linux/list.h>
  21#include <linux/blk-cgroup.h>
  22
  23#include "../../block/blk.h"
  24
  25#include <trace/events/block.h>
  26
  27#include "trace_output.h"
  28
  29#ifdef CONFIG_BLK_DEV_IO_TRACE
  30
  31static unsigned int blktrace_seq __read_mostly = 1;
  32
  33static struct trace_array *blk_tr;
  34static bool blk_tracer_enabled __read_mostly;
  35
  36static LIST_HEAD(running_trace_list);
  37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
  38
  39/* Select an alternative, minimalistic output than the original one */
  40#define TRACE_BLK_OPT_CLASSIC	0x1
  41#define TRACE_BLK_OPT_CGROUP	0x2
  42#define TRACE_BLK_OPT_CGNAME	0x4
  43
  44static struct tracer_opt blk_tracer_opts[] = {
  45	/* Default disable the minimalistic output */
  46	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  47#ifdef CONFIG_BLK_CGROUP
  48	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
  49	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
  50#endif
  51	{ }
  52};
  53
  54static struct tracer_flags blk_tracer_flags = {
  55	.val  = 0,
  56	.opts = blk_tracer_opts,
  57};
  58
  59/* Global reference count of probes */
  60static DEFINE_MUTEX(blk_probe_mutex);
  61static int blk_probes_ref;
  62
  63static void blk_register_tracepoints(void);
  64static void blk_unregister_tracepoints(void);
  65
  66/*
  67 * Send out a notify message.
  68 */
  69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  70		       const void *data, size_t len, u64 cgid)
  71{
  72	struct blk_io_trace *t;
  73	struct ring_buffer_event *event = NULL;
  74	struct trace_buffer *buffer = NULL;
  75	unsigned int trace_ctx = 0;
  76	int cpu = smp_processor_id();
  77	bool blk_tracer = blk_tracer_enabled;
  78	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
  79
  80	if (blk_tracer) {
  81		buffer = blk_tr->array_buffer.buffer;
  82		trace_ctx = tracing_gen_ctx_flags(0);
  83		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  84						  sizeof(*t) + len + cgid_len,
  85						  trace_ctx);
  86		if (!event)
  87			return;
  88		t = ring_buffer_event_data(event);
  89		goto record_it;
  90	}
  91
  92	if (!bt->rchan)
  93		return;
  94
  95	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
  96	if (t) {
  97		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  98		t->time = ktime_to_ns(ktime_get());
  99record_it:
 100		t->device = bt->dev;
 101		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
 102		t->pid = pid;
 103		t->cpu = cpu;
 104		t->pdu_len = len + cgid_len;
 105		if (cgid_len)
 106			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 107		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
 108
 109		if (blk_tracer)
 110			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
 111	}
 112}
 113
 114/*
 115 * Send out a notify for this process, if we haven't done so since a trace
 116 * started
 117 */
 118static void trace_note_tsk(struct task_struct *tsk)
 119{
 120	unsigned long flags;
 121	struct blk_trace *bt;
 122
 123	tsk->btrace_seq = blktrace_seq;
 124	raw_spin_lock_irqsave(&running_trace_lock, flags);
 125	list_for_each_entry(bt, &running_trace_list, running_list) {
 126		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 127			   sizeof(tsk->comm), 0);
 128	}
 129	raw_spin_unlock_irqrestore(&running_trace_lock, flags);
 130}
 131
 132static void trace_note_time(struct blk_trace *bt)
 133{
 134	struct timespec64 now;
 135	unsigned long flags;
 136	u32 words[2];
 137
 138	/* need to check user space to see if this breaks in y2038 or y2106 */
 139	ktime_get_real_ts64(&now);
 140	words[0] = (u32)now.tv_sec;
 141	words[1] = now.tv_nsec;
 142
 143	local_irq_save(flags);
 144	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
 145	local_irq_restore(flags);
 146}
 147
 148void __blk_trace_note_message(struct blk_trace *bt,
 149		struct cgroup_subsys_state *css, const char *fmt, ...)
 150{
 151	int n;
 152	va_list args;
 153	unsigned long flags;
 154	char *buf;
 155	u64 cgid = 0;
 156
 157	if (unlikely(bt->trace_state != Blktrace_running &&
 158		     !blk_tracer_enabled))
 159		return;
 160
 161	/*
 162	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
 163	 * message to the trace.
 164	 */
 165	if (!(bt->act_mask & BLK_TC_NOTIFY))
 166		return;
 167
 168	local_irq_save(flags);
 169	buf = this_cpu_ptr(bt->msg_data);
 170	va_start(args, fmt);
 171	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
 172	va_end(args);
 173
 
 
 174#ifdef CONFIG_BLK_CGROUP
 175	if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 176		cgid = cgroup_id(css->cgroup);
 177	else
 178		cgid = 1;
 179#endif
 180	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
 181	local_irq_restore(flags);
 182}
 183EXPORT_SYMBOL_GPL(__blk_trace_note_message);
 184
 185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 186			 pid_t pid)
 187{
 188	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
 189		return 1;
 190	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
 191		return 1;
 192	if (bt->pid && pid != bt->pid)
 193		return 1;
 194
 195	return 0;
 196}
 197
 198/*
 199 * Data direction bit lookup
 200 */
 201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 202				 BLK_TC_ACT(BLK_TC_WRITE) };
 203
 204#define BLK_TC_RAHEAD		BLK_TC_AHEAD
 205#define BLK_TC_PREFLUSH		BLK_TC_FLUSH
 206
 207/* The ilog2() calls fall out because they're constant */
 208#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) <<	\
 209	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 210
 211/*
 212 * The worker for the various blk_add_trace*() types. Fills out a
 213 * blk_io_trace structure and places it in a per-cpu subbuffer.
 214 */
 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 216			    const blk_opf_t opf, u32 what, int error,
 217			    int pdu_len, void *pdu_data, u64 cgid)
 218{
 219	struct task_struct *tsk = current;
 220	struct ring_buffer_event *event = NULL;
 221	struct trace_buffer *buffer = NULL;
 222	struct blk_io_trace *t;
 223	unsigned long flags = 0;
 224	unsigned long *sequence;
 225	unsigned int trace_ctx = 0;
 226	pid_t pid;
 227	int cpu;
 228	bool blk_tracer = blk_tracer_enabled;
 229	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 230	const enum req_op op = opf & REQ_OP_MASK;
 231
 232	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
 233		return;
 234
 235	what |= ddir_act[op_is_write(op) ? WRITE : READ];
 236	what |= MASK_TC_BIT(opf, SYNC);
 237	what |= MASK_TC_BIT(opf, RAHEAD);
 238	what |= MASK_TC_BIT(opf, META);
 239	what |= MASK_TC_BIT(opf, PREFLUSH);
 240	what |= MASK_TC_BIT(opf, FUA);
 241	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
 242		what |= BLK_TC_ACT(BLK_TC_DISCARD);
 243	if (op == REQ_OP_FLUSH)
 244		what |= BLK_TC_ACT(BLK_TC_FLUSH);
 245	if (cgid)
 246		what |= __BLK_TA_CGROUP;
 247
 248	pid = tsk->pid;
 249	if (act_log_check(bt, what, sector, pid))
 250		return;
 251	cpu = raw_smp_processor_id();
 252
 253	if (blk_tracer) {
 254		tracing_record_cmdline(current);
 255
 256		buffer = blk_tr->array_buffer.buffer;
 257		trace_ctx = tracing_gen_ctx_flags(0);
 258		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 259						  sizeof(*t) + pdu_len + cgid_len,
 260						  trace_ctx);
 261		if (!event)
 262			return;
 263		t = ring_buffer_event_data(event);
 264		goto record_it;
 265	}
 266
 267	if (unlikely(tsk->btrace_seq != blktrace_seq))
 268		trace_note_tsk(tsk);
 269
 270	/*
 271	 * A word about the locking here - we disable interrupts to reserve
 272	 * some space in the relay per-cpu buffer, to prevent an irq
 273	 * from coming in and stepping on our toes.
 274	 */
 275	local_irq_save(flags);
 276	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
 277	if (t) {
 278		sequence = per_cpu_ptr(bt->sequence, cpu);
 279
 280		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
 281		t->sequence = ++(*sequence);
 282		t->time = ktime_to_ns(ktime_get());
 283record_it:
 284		/*
 285		 * These two are not needed in ftrace as they are in the
 286		 * generic trace_entry, filled by tracing_generic_entry_update,
 287		 * but for the trace_event->bin() synthesizer benefit we do it
 288		 * here too.
 289		 */
 290		t->cpu = cpu;
 291		t->pid = pid;
 292
 293		t->sector = sector;
 294		t->bytes = bytes;
 295		t->action = what;
 296		t->device = bt->dev;
 297		t->error = error;
 298		t->pdu_len = pdu_len + cgid_len;
 299
 300		if (cgid_len)
 301			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 302		if (pdu_len)
 303			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
 304
 305		if (blk_tracer) {
 306			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
 307			return;
 308		}
 309	}
 310
 311	local_irq_restore(flags);
 312}
 313
 314static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
 315{
 
 
 316	relay_close(bt->rchan);
 317
 318	/*
 319	 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
 320	 * under 'q->debugfs_dir', thus lookup and remove them.
 321	 */
 322	if (!bt->dir) {
 323		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
 324		debugfs_lookup_and_remove("msg", q->debugfs_dir);
 325	} else {
 326		debugfs_remove(bt->dir);
 327	}
 328	free_percpu(bt->sequence);
 329	free_percpu(bt->msg_data);
 330	kfree(bt);
 331}
 332
 333static void get_probe_ref(void)
 334{
 335	mutex_lock(&blk_probe_mutex);
 336	if (++blk_probes_ref == 1)
 337		blk_register_tracepoints();
 338	mutex_unlock(&blk_probe_mutex);
 339}
 340
 341static void put_probe_ref(void)
 342{
 343	mutex_lock(&blk_probe_mutex);
 344	if (!--blk_probes_ref)
 345		blk_unregister_tracepoints();
 346	mutex_unlock(&blk_probe_mutex);
 347}
 348
 349static int blk_trace_start(struct blk_trace *bt)
 350{
 351	if (bt->trace_state != Blktrace_setup &&
 352	    bt->trace_state != Blktrace_stopped)
 353		return -EINVAL;
 354
 355	blktrace_seq++;
 356	smp_mb();
 357	bt->trace_state = Blktrace_running;
 358	raw_spin_lock_irq(&running_trace_lock);
 359	list_add(&bt->running_list, &running_trace_list);
 360	raw_spin_unlock_irq(&running_trace_lock);
 361	trace_note_time(bt);
 362
 363	return 0;
 364}
 365
 366static int blk_trace_stop(struct blk_trace *bt)
 367{
 368	if (bt->trace_state != Blktrace_running)
 369		return -EINVAL;
 370
 371	bt->trace_state = Blktrace_stopped;
 372	raw_spin_lock_irq(&running_trace_lock);
 373	list_del_init(&bt->running_list);
 374	raw_spin_unlock_irq(&running_trace_lock);
 375	relay_flush(bt->rchan);
 376
 377	return 0;
 378}
 379
 380static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 381{
 382	blk_trace_stop(bt);
 383	synchronize_rcu();
 384	blk_trace_free(q, bt);
 385	put_probe_ref();
 386}
 387
 388static int __blk_trace_remove(struct request_queue *q)
 389{
 390	struct blk_trace *bt;
 391
 392	bt = rcu_replace_pointer(q->blk_trace, NULL,
 393				 lockdep_is_held(&q->debugfs_mutex));
 394	if (!bt)
 395		return -EINVAL;
 396
 397	blk_trace_cleanup(q, bt);
 
 398
 399	return 0;
 400}
 401
 402int blk_trace_remove(struct request_queue *q)
 403{
 404	int ret;
 405
 406	mutex_lock(&q->debugfs_mutex);
 407	ret = __blk_trace_remove(q);
 408	mutex_unlock(&q->debugfs_mutex);
 409
 410	return ret;
 411}
 412EXPORT_SYMBOL_GPL(blk_trace_remove);
 413
 414static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 415				size_t count, loff_t *ppos)
 416{
 417	struct blk_trace *bt = filp->private_data;
 418	char buf[16];
 419
 420	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
 421
 422	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 423}
 424
 425static const struct file_operations blk_dropped_fops = {
 426	.owner =	THIS_MODULE,
 427	.open =		simple_open,
 428	.read =		blk_dropped_read,
 429	.llseek =	default_llseek,
 430};
 431
 432static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 433				size_t count, loff_t *ppos)
 434{
 435	char *msg;
 436	struct blk_trace *bt;
 437
 438	if (count >= BLK_TN_MAX_MSG)
 439		return -EINVAL;
 440
 441	msg = memdup_user_nul(buffer, count);
 442	if (IS_ERR(msg))
 443		return PTR_ERR(msg);
 444
 445	bt = filp->private_data;
 446	__blk_trace_note_message(bt, NULL, "%s", msg);
 447	kfree(msg);
 448
 449	return count;
 450}
 451
 452static const struct file_operations blk_msg_fops = {
 453	.owner =	THIS_MODULE,
 454	.open =		simple_open,
 455	.write =	blk_msg_write,
 456	.llseek =	noop_llseek,
 457};
 458
 459/*
 460 * Keep track of how many times we encountered a full subbuffer, to aid
 461 * the user space app in telling how many lost events there were.
 462 */
 463static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
 464				     void *prev_subbuf, size_t prev_padding)
 465{
 466	struct blk_trace *bt;
 467
 468	if (!relay_buf_full(buf))
 469		return 1;
 470
 471	bt = buf->chan->private_data;
 472	atomic_inc(&bt->dropped);
 473	return 0;
 474}
 475
 476static int blk_remove_buf_file_callback(struct dentry *dentry)
 477{
 478	debugfs_remove(dentry);
 479
 480	return 0;
 481}
 482
 483static struct dentry *blk_create_buf_file_callback(const char *filename,
 484						   struct dentry *parent,
 485						   umode_t mode,
 486						   struct rchan_buf *buf,
 487						   int *is_global)
 488{
 489	return debugfs_create_file(filename, mode, parent, buf,
 490					&relay_file_operations);
 491}
 492
 493static const struct rchan_callbacks blk_relay_callbacks = {
 494	.subbuf_start		= blk_subbuf_start_callback,
 495	.create_buf_file	= blk_create_buf_file_callback,
 496	.remove_buf_file	= blk_remove_buf_file_callback,
 497};
 498
 499static void blk_trace_setup_lba(struct blk_trace *bt,
 500				struct block_device *bdev)
 501{
 502	if (bdev) {
 503		bt->start_lba = bdev->bd_start_sect;
 504		bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
 
 
 
 
 
 505	} else {
 506		bt->start_lba = 0;
 507		bt->end_lba = -1ULL;
 508	}
 509}
 510
 511/*
 512 * Setup everything required to start tracing
 513 */
 514static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 515			      struct block_device *bdev,
 516			      struct blk_user_trace_setup *buts)
 517{
 518	struct blk_trace *bt = NULL;
 519	struct dentry *dir = NULL;
 520	int ret;
 521
 522	lockdep_assert_held(&q->debugfs_mutex);
 523
 524	if (!buts->buf_size || !buts->buf_nr)
 525		return -EINVAL;
 526
 527	strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE);
 
 528
 529	/*
 530	 * some device names have larger paths - convert the slashes
 531	 * to underscores for this to work as expected
 532	 */
 533	strreplace(buts->name, '/', '_');
 534
 535	/*
 536	 * bdev can be NULL, as with scsi-generic, this is a helpful as
 537	 * we can be.
 538	 */
 539	if (rcu_dereference_protected(q->blk_trace,
 540				      lockdep_is_held(&q->debugfs_mutex))) {
 541		pr_warn("Concurrent blktraces are not allowed on %s\n",
 542			buts->name);
 543		return -EBUSY;
 544	}
 545
 546	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
 547	if (!bt)
 548		return -ENOMEM;
 549
 550	ret = -ENOMEM;
 551	bt->sequence = alloc_percpu(unsigned long);
 552	if (!bt->sequence)
 553		goto err;
 554
 555	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
 556	if (!bt->msg_data)
 557		goto err;
 558
 559	/*
 560	 * When tracing the whole disk reuse the existing debugfs directory
 561	 * created by the block layer on init. For partitions block devices,
 562	 * and scsi-generic block devices we create a temporary new debugfs
 563	 * directory that will be removed once the trace ends.
 564	 */
 565	if (bdev && !bdev_is_partition(bdev))
 566		dir = q->debugfs_dir;
 567	else
 568		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
 569
 570	/*
 571	 * As blktrace relies on debugfs for its interface the debugfs directory
 572	 * is required, contrary to the usual mantra of not checking for debugfs
 573	 * files or directories.
 574	 */
 575	if (IS_ERR_OR_NULL(dir)) {
 576		pr_warn("debugfs_dir not present for %s so skipping\n",
 577			buts->name);
 578		ret = -ENOENT;
 579		goto err;
 580	}
 581
 582	bt->dev = dev;
 583	atomic_set(&bt->dropped, 0);
 584	INIT_LIST_HEAD(&bt->running_list);
 585
 586	ret = -EIO;
 587	debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
 588	debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
 
 
 589
 590	bt->rchan = relay_open("trace", dir, buts->buf_size,
 591				buts->buf_nr, &blk_relay_callbacks, bt);
 592	if (!bt->rchan)
 593		goto err;
 594
 595	bt->act_mask = buts->act_mask;
 596	if (!bt->act_mask)
 597		bt->act_mask = (u16) -1;
 598
 599	blk_trace_setup_lba(bt, bdev);
 600
 601	/* overwrite with user settings */
 602	if (buts->start_lba)
 603		bt->start_lba = buts->start_lba;
 604	if (buts->end_lba)
 605		bt->end_lba = buts->end_lba;
 606
 607	bt->pid = buts->pid;
 608	bt->trace_state = Blktrace_setup;
 609
 610	rcu_assign_pointer(q->blk_trace, bt);
 611	get_probe_ref();
 612
 613	ret = 0;
 614err:
 615	if (ret)
 616		blk_trace_free(q, bt);
 617	return ret;
 618}
 619
 620static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 621			     struct block_device *bdev, char __user *arg)
 622{
 623	struct blk_user_trace_setup buts;
 624	int ret;
 625
 626	ret = copy_from_user(&buts, arg, sizeof(buts));
 627	if (ret)
 628		return -EFAULT;
 629
 630	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 631	if (ret)
 632		return ret;
 633
 634	if (copy_to_user(arg, &buts, sizeof(buts))) {
 635		__blk_trace_remove(q);
 636		return -EFAULT;
 637	}
 638	return 0;
 639}
 640
 641int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 642		    struct block_device *bdev,
 643		    char __user *arg)
 644{
 645	int ret;
 646
 647	mutex_lock(&q->debugfs_mutex);
 648	ret = __blk_trace_setup(q, name, dev, bdev, arg);
 649	mutex_unlock(&q->debugfs_mutex);
 650
 651	return ret;
 652}
 653EXPORT_SYMBOL_GPL(blk_trace_setup);
 654
 655#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 656static int compat_blk_trace_setup(struct request_queue *q, char *name,
 657				  dev_t dev, struct block_device *bdev,
 658				  char __user *arg)
 659{
 660	struct blk_user_trace_setup buts;
 661	struct compat_blk_user_trace_setup cbuts;
 662	int ret;
 663
 664	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
 665		return -EFAULT;
 666
 667	buts = (struct blk_user_trace_setup) {
 668		.act_mask = cbuts.act_mask,
 669		.buf_size = cbuts.buf_size,
 670		.buf_nr = cbuts.buf_nr,
 671		.start_lba = cbuts.start_lba,
 672		.end_lba = cbuts.end_lba,
 673		.pid = cbuts.pid,
 674	};
 675
 676	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 677	if (ret)
 678		return ret;
 679
 680	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 681		__blk_trace_remove(q);
 682		return -EFAULT;
 683	}
 684
 685	return 0;
 686}
 687#endif
 688
 689static int __blk_trace_startstop(struct request_queue *q, int start)
 690{
 
 691	struct blk_trace *bt;
 692
 693	bt = rcu_dereference_protected(q->blk_trace,
 694				       lockdep_is_held(&q->debugfs_mutex));
 695	if (bt == NULL)
 696		return -EINVAL;
 697
 698	if (start)
 699		return blk_trace_start(bt);
 700	else
 701		return blk_trace_stop(bt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702}
 703
 704int blk_trace_startstop(struct request_queue *q, int start)
 705{
 706	int ret;
 707
 708	mutex_lock(&q->debugfs_mutex);
 709	ret = __blk_trace_startstop(q, start);
 710	mutex_unlock(&q->debugfs_mutex);
 711
 712	return ret;
 713}
 714EXPORT_SYMBOL_GPL(blk_trace_startstop);
 715
 716/*
 717 * When reading or writing the blktrace sysfs files, the references to the
 718 * opened sysfs or device files should prevent the underlying block device
 719 * from being removed. So no further delete protection is really needed.
 720 */
 721
 722/**
 723 * blk_trace_ioctl - handle the ioctls associated with tracing
 724 * @bdev:	the block device
 725 * @cmd:	the ioctl cmd
 726 * @arg:	the argument data, if any
 727 *
 728 **/
 729int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 730{
 731	struct request_queue *q = bdev_get_queue(bdev);
 732	int ret, start = 0;
 733	char b[BDEVNAME_SIZE];
 734
 
 
 
 
 735	mutex_lock(&q->debugfs_mutex);
 736
 737	switch (cmd) {
 738	case BLKTRACESETUP:
 739		snprintf(b, sizeof(b), "%pg", bdev);
 740		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 741		break;
 742#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 743	case BLKTRACESETUP32:
 744		snprintf(b, sizeof(b), "%pg", bdev);
 745		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 746		break;
 747#endif
 748	case BLKTRACESTART:
 749		start = 1;
 750		fallthrough;
 751	case BLKTRACESTOP:
 752		ret = __blk_trace_startstop(q, start);
 753		break;
 754	case BLKTRACETEARDOWN:
 755		ret = __blk_trace_remove(q);
 756		break;
 757	default:
 758		ret = -ENOTTY;
 759		break;
 760	}
 761
 762	mutex_unlock(&q->debugfs_mutex);
 763	return ret;
 764}
 765
 766/**
 767 * blk_trace_shutdown - stop and cleanup trace structures
 768 * @q:    the request queue associated with the device
 769 *
 770 **/
 771void blk_trace_shutdown(struct request_queue *q)
 772{
 
 773	if (rcu_dereference_protected(q->blk_trace,
 774				      lockdep_is_held(&q->debugfs_mutex)))
 
 775		__blk_trace_remove(q);
 
 
 
 776}
 777
 778#ifdef CONFIG_BLK_CGROUP
 779static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 780{
 781	struct cgroup_subsys_state *blkcg_css;
 782	struct blk_trace *bt;
 783
 784	/* We don't use the 'bt' value here except as an optimization... */
 785	bt = rcu_dereference_protected(q->blk_trace, 1);
 786	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 787		return 0;
 788
 789	blkcg_css = bio_blkcg_css(bio);
 790	if (!blkcg_css)
 791		return 0;
 792	return cgroup_id(blkcg_css->cgroup);
 793}
 794#else
 795static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 796{
 797	return 0;
 798}
 799#endif
 800
 801static u64
 802blk_trace_request_get_cgid(struct request *rq)
 803{
 804	if (!rq->bio)
 805		return 0;
 806	/* Use the first bio */
 807	return blk_trace_bio_get_cgid(rq->q, rq->bio);
 808}
 809
 810/*
 811 * blktrace probes
 812 */
 813
 814/**
 815 * blk_add_trace_rq - Add a trace for a request oriented action
 816 * @rq:		the source request
 817 * @error:	return status to log
 818 * @nr_bytes:	number of completed bytes
 819 * @what:	the action
 820 * @cgid:	the cgroup info
 821 *
 822 * Description:
 823 *     Records an action against a request. Will log the bio offset + size.
 824 *
 825 **/
 826static void blk_add_trace_rq(struct request *rq, blk_status_t error,
 827			     unsigned int nr_bytes, u32 what, u64 cgid)
 828{
 829	struct blk_trace *bt;
 830
 831	rcu_read_lock();
 832	bt = rcu_dereference(rq->q->blk_trace);
 833	if (likely(!bt)) {
 834		rcu_read_unlock();
 835		return;
 836	}
 837
 838	if (blk_rq_is_passthrough(rq))
 839		what |= BLK_TC_ACT(BLK_TC_PC);
 840	else
 841		what |= BLK_TC_ACT(BLK_TC_FS);
 842
 843	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
 844			what, blk_status_to_errno(error), 0, NULL, cgid);
 845	rcu_read_unlock();
 846}
 847
 848static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
 
 849{
 850	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
 851			 blk_trace_request_get_cgid(rq));
 852}
 853
 854static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
 
 855{
 856	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
 857			 blk_trace_request_get_cgid(rq));
 858}
 859
 860static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
 
 861{
 862	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
 863			 blk_trace_request_get_cgid(rq));
 864}
 865
 866static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
 
 
 867{
 868	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
 869			 blk_trace_request_get_cgid(rq));
 870}
 871
 872static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
 873			blk_status_t error, unsigned int nr_bytes)
 874{
 875	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
 876			 blk_trace_request_get_cgid(rq));
 877}
 878
 879/**
 880 * blk_add_trace_bio - Add a trace for a bio oriented action
 881 * @q:		queue the io is for
 882 * @bio:	the source bio
 883 * @what:	the action
 884 * @error:	error, if any
 885 *
 886 * Description:
 887 *     Records an action against a bio. Will log the bio offset + size.
 888 *
 889 **/
 890static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 891			      u32 what, int error)
 892{
 893	struct blk_trace *bt;
 894
 895	rcu_read_lock();
 896	bt = rcu_dereference(q->blk_trace);
 897	if (likely(!bt)) {
 898		rcu_read_unlock();
 899		return;
 900	}
 901
 902	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 903			bio->bi_opf, what, error, 0, NULL,
 904			blk_trace_bio_get_cgid(q, bio));
 905	rcu_read_unlock();
 906}
 907
 908static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
 
 909{
 910	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
 911}
 912
 913static void blk_add_trace_bio_complete(void *ignore,
 914				       struct request_queue *q, struct bio *bio)
 915{
 916	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
 917			  blk_status_to_errno(bio->bi_status));
 918}
 919
 920static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 921{
 922	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
 923			0);
 924}
 925
 926static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
 
 927{
 928	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
 929			0);
 930}
 931
 932static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
 
 
 933{
 934	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
 
 
 
 
 
 
 
 
 
 
 
 935}
 936
 937static void blk_add_trace_getrq(void *ignore, struct bio *bio)
 
 
 
 938{
 939	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
 
 
 
 
 
 
 
 
 
 
 
 940}
 941
 942static void blk_add_trace_plug(void *ignore, struct request_queue *q)
 943{
 944	struct blk_trace *bt;
 945
 946	rcu_read_lock();
 947	bt = rcu_dereference(q->blk_trace);
 948	if (bt)
 949		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
 950	rcu_read_unlock();
 951}
 952
 953static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
 954				    unsigned int depth, bool explicit)
 955{
 956	struct blk_trace *bt;
 957
 958	rcu_read_lock();
 959	bt = rcu_dereference(q->blk_trace);
 960	if (bt) {
 961		__be64 rpdu = cpu_to_be64(depth);
 962		u32 what;
 963
 964		if (explicit)
 965			what = BLK_TA_UNPLUG_IO;
 966		else
 967			what = BLK_TA_UNPLUG_TIMER;
 968
 969		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
 970	}
 971	rcu_read_unlock();
 972}
 973
 974static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
 
 
 975{
 976	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
 977	struct blk_trace *bt;
 978
 979	rcu_read_lock();
 980	bt = rcu_dereference(q->blk_trace);
 981	if (bt) {
 982		__be64 rpdu = cpu_to_be64(pdu);
 983
 984		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 985				bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
 
 986				blk_status_to_errno(bio->bi_status),
 987				sizeof(rpdu), &rpdu,
 988				blk_trace_bio_get_cgid(q, bio));
 989	}
 990	rcu_read_unlock();
 991}
 992
 993/**
 994 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
 995 * @ignore:	trace callback data parameter (not used)
 
 996 * @bio:	the source bio
 997 * @dev:	source device
 998 * @from:	source sector
 999 *
1000 * Called after a bio is remapped to a different device and/or sector.
 
 
 
1001 **/
1002static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1003				    sector_t from)
 
1004{
1005	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1006	struct blk_trace *bt;
1007	struct blk_io_trace_remap r;
1008
1009	rcu_read_lock();
1010	bt = rcu_dereference(q->blk_trace);
1011	if (likely(!bt)) {
1012		rcu_read_unlock();
1013		return;
1014	}
1015
1016	r.device_from = cpu_to_be32(dev);
1017	r.device_to   = cpu_to_be32(bio_dev(bio));
1018	r.sector_from = cpu_to_be64(from);
1019
1020	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1021			bio->bi_opf, BLK_TA_REMAP,
1022			blk_status_to_errno(bio->bi_status),
1023			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1024	rcu_read_unlock();
1025}
1026
1027/**
1028 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1029 * @ignore:	trace callback data parameter (not used)
 
1030 * @rq:		the source request
1031 * @dev:	target device
1032 * @from:	source sector
1033 *
1034 * Description:
1035 *     Device mapper remaps request to other devices.
1036 *     Add a trace for that action.
1037 *
1038 **/
1039static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
 
 
1040				   sector_t from)
1041{
1042	struct blk_trace *bt;
1043	struct blk_io_trace_remap r;
1044
1045	rcu_read_lock();
1046	bt = rcu_dereference(rq->q->blk_trace);
1047	if (likely(!bt)) {
1048		rcu_read_unlock();
1049		return;
1050	}
1051
1052	r.device_from = cpu_to_be32(dev);
1053	r.device_to   = cpu_to_be32(disk_devt(rq->q->disk));
1054	r.sector_from = cpu_to_be64(from);
1055
1056	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1057			rq->cmd_flags, BLK_TA_REMAP, 0,
1058			sizeof(r), &r, blk_trace_request_get_cgid(rq));
1059	rcu_read_unlock();
1060}
1061
1062/**
1063 * blk_add_driver_data - Add binary message with driver-specific data
 
1064 * @rq:		io request
1065 * @data:	driver-specific data
1066 * @len:	length of driver-specific data
1067 *
1068 * Description:
1069 *     Some drivers might want to write driver-specific data per request.
1070 *
1071 **/
1072void blk_add_driver_data(struct request *rq, void *data, size_t len)
 
 
1073{
1074	struct blk_trace *bt;
1075
1076	rcu_read_lock();
1077	bt = rcu_dereference(rq->q->blk_trace);
1078	if (likely(!bt)) {
1079		rcu_read_unlock();
1080		return;
1081	}
1082
1083	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1084				BLK_TA_DRV_DATA, 0, len, data,
1085				blk_trace_request_get_cgid(rq));
1086	rcu_read_unlock();
1087}
1088EXPORT_SYMBOL_GPL(blk_add_driver_data);
1089
1090static void blk_register_tracepoints(void)
1091{
1092	int ret;
1093
1094	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1095	WARN_ON(ret);
1096	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1097	WARN_ON(ret);
1098	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1099	WARN_ON(ret);
1100	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1101	WARN_ON(ret);
1102	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1103	WARN_ON(ret);
1104	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1105	WARN_ON(ret);
1106	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1107	WARN_ON(ret);
1108	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1109	WARN_ON(ret);
1110	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1111	WARN_ON(ret);
1112	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1113	WARN_ON(ret);
1114	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1115	WARN_ON(ret);
 
 
1116	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1117	WARN_ON(ret);
1118	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1119	WARN_ON(ret);
1120	ret = register_trace_block_split(blk_add_trace_split, NULL);
1121	WARN_ON(ret);
1122	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1123	WARN_ON(ret);
1124	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1125	WARN_ON(ret);
1126}
1127
1128static void blk_unregister_tracepoints(void)
1129{
1130	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1131	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1132	unregister_trace_block_split(blk_add_trace_split, NULL);
1133	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1134	unregister_trace_block_plug(blk_add_trace_plug, NULL);
 
1135	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1136	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1137	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1138	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1139	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1140	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1141	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1142	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1143	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1144	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1145	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1146
1147	tracepoint_synchronize_unregister();
1148}
1149
1150/*
1151 * struct blk_io_tracer formatting routines
1152 */
1153
1154static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1155{
1156	int i = 0;
1157	int tc = t->action >> BLK_TC_SHIFT;
1158
1159	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1160		rwbs[i++] = 'N';
1161		goto out;
1162	}
1163
1164	if (tc & BLK_TC_FLUSH)
1165		rwbs[i++] = 'F';
1166
1167	if (tc & BLK_TC_DISCARD)
1168		rwbs[i++] = 'D';
1169	else if (tc & BLK_TC_WRITE)
1170		rwbs[i++] = 'W';
1171	else if (t->bytes)
1172		rwbs[i++] = 'R';
1173	else
1174		rwbs[i++] = 'N';
1175
1176	if (tc & BLK_TC_FUA)
1177		rwbs[i++] = 'F';
1178	if (tc & BLK_TC_AHEAD)
1179		rwbs[i++] = 'A';
1180	if (tc & BLK_TC_SYNC)
1181		rwbs[i++] = 'S';
1182	if (tc & BLK_TC_META)
1183		rwbs[i++] = 'M';
1184out:
1185	rwbs[i] = '\0';
1186}
1187
1188static inline
1189const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1190{
1191	return (const struct blk_io_trace *)ent;
1192}
1193
1194static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1195{
1196	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1197}
1198
1199static inline u64 t_cgid(const struct trace_entry *ent)
1200{
1201	return *(u64 *)(te_blk_io_trace(ent) + 1);
1202}
1203
1204static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1205{
1206	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1207}
1208
1209static inline u32 t_action(const struct trace_entry *ent)
1210{
1211	return te_blk_io_trace(ent)->action;
1212}
1213
1214static inline u32 t_bytes(const struct trace_entry *ent)
1215{
1216	return te_blk_io_trace(ent)->bytes;
1217}
1218
1219static inline u32 t_sec(const struct trace_entry *ent)
1220{
1221	return te_blk_io_trace(ent)->bytes >> 9;
1222}
1223
1224static inline unsigned long long t_sector(const struct trace_entry *ent)
1225{
1226	return te_blk_io_trace(ent)->sector;
1227}
1228
1229static inline __u16 t_error(const struct trace_entry *ent)
1230{
1231	return te_blk_io_trace(ent)->error;
1232}
1233
1234static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1235{
1236	const __be64 *val = pdu_start(ent, has_cg);
1237	return be64_to_cpu(*val);
1238}
1239
1240typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1241	bool has_cg);
1242
1243static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1244	bool has_cg)
1245{
1246	char rwbs[RWBS_LEN];
1247	unsigned long long ts  = iter->ts;
1248	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1249	unsigned secs	       = (unsigned long)ts;
1250	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1251
1252	fill_rwbs(rwbs, t);
1253
1254	trace_seq_printf(&iter->seq,
1255			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1256			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1257			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1258}
1259
1260static void blk_log_action(struct trace_iterator *iter, const char *act,
1261	bool has_cg)
1262{
1263	char rwbs[RWBS_LEN];
1264	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1265
1266	fill_rwbs(rwbs, t);
1267	if (has_cg) {
1268		u64 id = t_cgid(iter->ent);
1269
1270		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1271			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1272
1273			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1274				sizeof(blkcg_name_buf));
1275			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1276				 MAJOR(t->device), MINOR(t->device),
1277				 blkcg_name_buf, act, rwbs);
1278		} else {
1279			/*
1280			 * The cgid portion used to be "INO,GEN".  Userland
1281			 * builds a FILEID_INO32_GEN fid out of them and
1282			 * opens the cgroup using open_by_handle_at(2).
1283			 * While 32bit ino setups are still the same, 64bit
1284			 * ones now use the 64bit ino as the whole ID and
1285			 * no longer use generation.
1286			 *
1287			 * Regardless of the content, always output
1288			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1289			 * be mapped back to @id on both 64 and 32bit ino
1290			 * setups.  See __kernfs_fh_to_dentry().
1291			 */
1292			trace_seq_printf(&iter->seq,
1293				 "%3d,%-3d %llx,%-llx %2s %3s ",
1294				 MAJOR(t->device), MINOR(t->device),
1295				 id & U32_MAX, id >> 32, act, rwbs);
1296		}
1297	} else
1298		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1299				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1300}
1301
1302static void blk_log_dump_pdu(struct trace_seq *s,
1303	const struct trace_entry *ent, bool has_cg)
1304{
1305	const unsigned char *pdu_buf;
1306	int pdu_len;
1307	int i, end;
1308
1309	pdu_buf = pdu_start(ent, has_cg);
1310	pdu_len = pdu_real_len(ent, has_cg);
1311
1312	if (!pdu_len)
1313		return;
1314
1315	/* find the last zero that needs to be printed */
1316	for (end = pdu_len - 1; end >= 0; end--)
1317		if (pdu_buf[end])
1318			break;
1319	end++;
1320
1321	trace_seq_putc(s, '(');
1322
1323	for (i = 0; i < pdu_len; i++) {
1324
1325		trace_seq_printf(s, "%s%02x",
1326				 i == 0 ? "" : " ", pdu_buf[i]);
1327
1328		/*
1329		 * stop when the rest is just zeros and indicate so
1330		 * with a ".." appended
1331		 */
1332		if (i == end && end != pdu_len - 1) {
1333			trace_seq_puts(s, " ..) ");
1334			return;
1335		}
1336	}
1337
1338	trace_seq_puts(s, ") ");
1339}
1340
1341static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1342{
1343	char cmd[TASK_COMM_LEN];
1344
1345	trace_find_cmdline(ent->pid, cmd);
1346
1347	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1348		trace_seq_printf(s, "%u ", t_bytes(ent));
1349		blk_log_dump_pdu(s, ent, has_cg);
1350		trace_seq_printf(s, "[%s]\n", cmd);
1351	} else {
1352		if (t_sec(ent))
1353			trace_seq_printf(s, "%llu + %u [%s]\n",
1354						t_sector(ent), t_sec(ent), cmd);
1355		else
1356			trace_seq_printf(s, "[%s]\n", cmd);
1357	}
1358}
1359
1360static void blk_log_with_error(struct trace_seq *s,
1361			      const struct trace_entry *ent, bool has_cg)
1362{
1363	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1364		blk_log_dump_pdu(s, ent, has_cg);
1365		trace_seq_printf(s, "[%d]\n", t_error(ent));
1366	} else {
1367		if (t_sec(ent))
1368			trace_seq_printf(s, "%llu + %u [%d]\n",
1369					 t_sector(ent),
1370					 t_sec(ent), t_error(ent));
1371		else
1372			trace_seq_printf(s, "%llu [%d]\n",
1373					 t_sector(ent), t_error(ent));
1374	}
1375}
1376
1377static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1378{
1379	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1380
1381	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1382			 t_sector(ent), t_sec(ent),
1383			 MAJOR(be32_to_cpu(__r->device_from)),
1384			 MINOR(be32_to_cpu(__r->device_from)),
1385			 be64_to_cpu(__r->sector_from));
1386}
1387
1388static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1389{
1390	char cmd[TASK_COMM_LEN];
1391
1392	trace_find_cmdline(ent->pid, cmd);
1393
1394	trace_seq_printf(s, "[%s]\n", cmd);
1395}
1396
1397static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1398{
1399	char cmd[TASK_COMM_LEN];
1400
1401	trace_find_cmdline(ent->pid, cmd);
1402
1403	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1404}
1405
1406static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1407{
1408	char cmd[TASK_COMM_LEN];
1409
1410	trace_find_cmdline(ent->pid, cmd);
1411
1412	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1413			 get_pdu_int(ent, has_cg), cmd);
1414}
1415
1416static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1417			bool has_cg)
1418{
1419
1420	trace_seq_putmem(s, pdu_start(ent, has_cg),
1421		pdu_real_len(ent, has_cg));
1422	trace_seq_putc(s, '\n');
1423}
1424
1425/*
1426 * struct tracer operations
1427 */
1428
1429static void blk_tracer_print_header(struct seq_file *m)
1430{
1431	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1432		return;
1433	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1434		    "#  |     |     |           |   |   |\n");
1435}
1436
1437static void blk_tracer_start(struct trace_array *tr)
1438{
1439	blk_tracer_enabled = true;
1440}
1441
1442static int blk_tracer_init(struct trace_array *tr)
1443{
1444	blk_tr = tr;
1445	blk_tracer_start(tr);
1446	return 0;
1447}
1448
1449static void blk_tracer_stop(struct trace_array *tr)
1450{
1451	blk_tracer_enabled = false;
1452}
1453
1454static void blk_tracer_reset(struct trace_array *tr)
1455{
1456	blk_tracer_stop(tr);
1457}
1458
1459static const struct {
1460	const char *act[2];
1461	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1462			    bool has_cg);
1463} what2act[] = {
1464	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1465	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1466	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1467	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1468	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1469	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1470	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1471	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1472	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1473	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1474	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1475	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1476	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1477	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1478	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1479};
1480
1481static enum print_line_t print_one_line(struct trace_iterator *iter,
1482					bool classic)
1483{
1484	struct trace_array *tr = iter->tr;
1485	struct trace_seq *s = &iter->seq;
1486	const struct blk_io_trace *t;
1487	u16 what;
1488	bool long_act;
1489	blk_log_action_t *log_action;
1490	bool has_cg;
1491
1492	t	   = te_blk_io_trace(iter->ent);
1493	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1494	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1495	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1496	has_cg	   = t->action & __BLK_TA_CGROUP;
1497
1498	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1499		log_action(iter, long_act ? "message" : "m", has_cg);
1500		blk_log_msg(s, iter->ent, has_cg);
1501		return trace_handle_return(s);
1502	}
1503
1504	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1505		trace_seq_printf(s, "Unknown action %x\n", what);
1506	else {
1507		log_action(iter, what2act[what].act[long_act], has_cg);
1508		what2act[what].print(s, iter->ent, has_cg);
1509	}
1510
1511	return trace_handle_return(s);
1512}
1513
1514static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1515					       int flags, struct trace_event *event)
1516{
1517	return print_one_line(iter, false);
1518}
1519
1520static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1521{
1522	struct trace_seq *s = &iter->seq;
1523	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1524	const int offset = offsetof(struct blk_io_trace, sector);
1525	struct blk_io_trace old = {
1526		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1527		.time     = iter->ts,
1528	};
1529
1530	trace_seq_putmem(s, &old, offset);
1531	trace_seq_putmem(s, &t->sector,
1532			 sizeof(old) - offset + t->pdu_len);
1533}
1534
1535static enum print_line_t
1536blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1537			     struct trace_event *event)
1538{
1539	blk_trace_synthesize_old_trace(iter);
1540
1541	return trace_handle_return(&iter->seq);
1542}
1543
1544static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1545{
1546	if ((iter->ent->type != TRACE_BLK) ||
1547	    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1548		return TRACE_TYPE_UNHANDLED;
1549
1550	return print_one_line(iter, true);
1551}
1552
1553static int
1554blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1555{
1556	/* don't output context-info for blk_classic output */
1557	if (bit == TRACE_BLK_OPT_CLASSIC) {
1558		if (set)
1559			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1560		else
1561			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1562	}
1563	return 0;
1564}
1565
1566static struct tracer blk_tracer __read_mostly = {
1567	.name		= "blk",
1568	.init		= blk_tracer_init,
1569	.reset		= blk_tracer_reset,
1570	.start		= blk_tracer_start,
1571	.stop		= blk_tracer_stop,
1572	.print_header	= blk_tracer_print_header,
1573	.print_line	= blk_tracer_print_line,
1574	.flags		= &blk_tracer_flags,
1575	.set_flag	= blk_tracer_set_flag,
1576};
1577
1578static struct trace_event_functions trace_blk_event_funcs = {
1579	.trace		= blk_trace_event_print,
1580	.binary		= blk_trace_event_print_binary,
1581};
1582
1583static struct trace_event trace_blk_event = {
1584	.type		= TRACE_BLK,
1585	.funcs		= &trace_blk_event_funcs,
1586};
1587
1588static int __init init_blk_tracer(void)
1589{
1590	if (!register_trace_event(&trace_blk_event)) {
1591		pr_warn("Warning: could not register block events\n");
1592		return 1;
1593	}
1594
1595	if (register_tracer(&blk_tracer) != 0) {
1596		pr_warn("Warning: could not register the block tracer\n");
1597		unregister_trace_event(&trace_blk_event);
1598		return 1;
1599	}
1600
1601	return 0;
1602}
1603
1604device_initcall(init_blk_tracer);
1605
1606static int blk_trace_remove_queue(struct request_queue *q)
1607{
1608	struct blk_trace *bt;
1609
1610	bt = rcu_replace_pointer(q->blk_trace, NULL,
1611				 lockdep_is_held(&q->debugfs_mutex));
1612	if (bt == NULL)
1613		return -EINVAL;
1614
1615	blk_trace_stop(bt);
1616
1617	put_probe_ref();
1618	synchronize_rcu();
1619	blk_trace_free(q, bt);
1620	return 0;
1621}
1622
1623/*
1624 * Setup everything required to start tracing
1625 */
1626static int blk_trace_setup_queue(struct request_queue *q,
1627				 struct block_device *bdev)
1628{
1629	struct blk_trace *bt = NULL;
1630	int ret = -ENOMEM;
1631
1632	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1633	if (!bt)
1634		return -ENOMEM;
1635
1636	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1637	if (!bt->msg_data)
1638		goto free_bt;
1639
1640	bt->dev = bdev->bd_dev;
1641	bt->act_mask = (u16)-1;
1642
1643	blk_trace_setup_lba(bt, bdev);
1644
1645	rcu_assign_pointer(q->blk_trace, bt);
1646	get_probe_ref();
1647	return 0;
1648
1649free_bt:
1650	blk_trace_free(q, bt);
1651	return ret;
1652}
1653
1654/*
1655 * sysfs interface to enable and configure tracing
1656 */
1657
1658static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1659					 struct device_attribute *attr,
1660					 char *buf);
1661static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1662					  struct device_attribute *attr,
1663					  const char *buf, size_t count);
1664#define BLK_TRACE_DEVICE_ATTR(_name) \
1665	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1666		    sysfs_blk_trace_attr_show, \
1667		    sysfs_blk_trace_attr_store)
1668
1669static BLK_TRACE_DEVICE_ATTR(enable);
1670static BLK_TRACE_DEVICE_ATTR(act_mask);
1671static BLK_TRACE_DEVICE_ATTR(pid);
1672static BLK_TRACE_DEVICE_ATTR(start_lba);
1673static BLK_TRACE_DEVICE_ATTR(end_lba);
1674
1675static struct attribute *blk_trace_attrs[] = {
1676	&dev_attr_enable.attr,
1677	&dev_attr_act_mask.attr,
1678	&dev_attr_pid.attr,
1679	&dev_attr_start_lba.attr,
1680	&dev_attr_end_lba.attr,
1681	NULL
1682};
1683
1684struct attribute_group blk_trace_attr_group = {
1685	.name  = "trace",
1686	.attrs = blk_trace_attrs,
1687};
1688
1689static const struct {
1690	int mask;
1691	const char *str;
1692} mask_maps[] = {
1693	{ BLK_TC_READ,		"read"		},
1694	{ BLK_TC_WRITE,		"write"		},
1695	{ BLK_TC_FLUSH,		"flush"		},
1696	{ BLK_TC_SYNC,		"sync"		},
1697	{ BLK_TC_QUEUE,		"queue"		},
1698	{ BLK_TC_REQUEUE,	"requeue"	},
1699	{ BLK_TC_ISSUE,		"issue"		},
1700	{ BLK_TC_COMPLETE,	"complete"	},
1701	{ BLK_TC_FS,		"fs"		},
1702	{ BLK_TC_PC,		"pc"		},
1703	{ BLK_TC_NOTIFY,	"notify"	},
1704	{ BLK_TC_AHEAD,		"ahead"		},
1705	{ BLK_TC_META,		"meta"		},
1706	{ BLK_TC_DISCARD,	"discard"	},
1707	{ BLK_TC_DRV_DATA,	"drv_data"	},
1708	{ BLK_TC_FUA,		"fua"		},
1709};
1710
1711static int blk_trace_str2mask(const char *str)
1712{
1713	int i;
1714	int mask = 0;
1715	char *buf, *s, *token;
1716
1717	buf = kstrdup(str, GFP_KERNEL);
1718	if (buf == NULL)
1719		return -ENOMEM;
1720	s = strstrip(buf);
1721
1722	while (1) {
1723		token = strsep(&s, ",");
1724		if (token == NULL)
1725			break;
1726
1727		if (*token == '\0')
1728			continue;
1729
1730		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1731			if (strcasecmp(token, mask_maps[i].str) == 0) {
1732				mask |= mask_maps[i].mask;
1733				break;
1734			}
1735		}
1736		if (i == ARRAY_SIZE(mask_maps)) {
1737			mask = -EINVAL;
1738			break;
1739		}
1740	}
1741	kfree(buf);
1742
1743	return mask;
1744}
1745
1746static ssize_t blk_trace_mask2str(char *buf, int mask)
1747{
1748	int i;
1749	char *p = buf;
1750
1751	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1752		if (mask & mask_maps[i].mask) {
1753			p += sprintf(p, "%s%s",
1754				    (p == buf) ? "" : ",", mask_maps[i].str);
1755		}
1756	}
1757	*p++ = '\n';
1758
1759	return p - buf;
1760}
1761
 
 
 
 
 
 
 
 
1762static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1763					 struct device_attribute *attr,
1764					 char *buf)
1765{
1766	struct block_device *bdev = dev_to_bdev(dev);
1767	struct request_queue *q = bdev_get_queue(bdev);
 
1768	struct blk_trace *bt;
1769	ssize_t ret = -ENXIO;
1770
 
 
 
 
 
 
 
 
1771	mutex_lock(&q->debugfs_mutex);
1772
1773	bt = rcu_dereference_protected(q->blk_trace,
1774				       lockdep_is_held(&q->debugfs_mutex));
1775	if (attr == &dev_attr_enable) {
1776		ret = sprintf(buf, "%u\n", !!bt);
1777		goto out_unlock_bdev;
1778	}
1779
1780	if (bt == NULL)
1781		ret = sprintf(buf, "disabled\n");
1782	else if (attr == &dev_attr_act_mask)
1783		ret = blk_trace_mask2str(buf, bt->act_mask);
1784	else if (attr == &dev_attr_pid)
1785		ret = sprintf(buf, "%u\n", bt->pid);
1786	else if (attr == &dev_attr_start_lba)
1787		ret = sprintf(buf, "%llu\n", bt->start_lba);
1788	else if (attr == &dev_attr_end_lba)
1789		ret = sprintf(buf, "%llu\n", bt->end_lba);
1790
1791out_unlock_bdev:
1792	mutex_unlock(&q->debugfs_mutex);
 
 
 
1793	return ret;
1794}
1795
1796static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1797					  struct device_attribute *attr,
1798					  const char *buf, size_t count)
1799{
1800	struct block_device *bdev = dev_to_bdev(dev);
1801	struct request_queue *q = bdev_get_queue(bdev);
 
1802	struct blk_trace *bt;
1803	u64 value;
1804	ssize_t ret = -EINVAL;
1805
1806	if (count == 0)
1807		goto out;
1808
1809	if (attr == &dev_attr_act_mask) {
1810		if (kstrtoull(buf, 0, &value)) {
1811			/* Assume it is a list of trace category names */
1812			ret = blk_trace_str2mask(buf);
1813			if (ret < 0)
1814				goto out;
1815			value = ret;
1816		}
1817	} else {
1818		if (kstrtoull(buf, 0, &value))
1819			goto out;
1820	}
 
 
 
 
 
 
 
 
 
1821
1822	mutex_lock(&q->debugfs_mutex);
1823
1824	bt = rcu_dereference_protected(q->blk_trace,
1825				       lockdep_is_held(&q->debugfs_mutex));
1826	if (attr == &dev_attr_enable) {
1827		if (!!value == !!bt) {
1828			ret = 0;
1829			goto out_unlock_bdev;
1830		}
1831		if (value)
1832			ret = blk_trace_setup_queue(q, bdev);
1833		else
1834			ret = blk_trace_remove_queue(q);
1835		goto out_unlock_bdev;
1836	}
1837
1838	ret = 0;
1839	if (bt == NULL) {
1840		ret = blk_trace_setup_queue(q, bdev);
1841		bt = rcu_dereference_protected(q->blk_trace,
1842				lockdep_is_held(&q->debugfs_mutex));
1843	}
1844
1845	if (ret == 0) {
1846		if (attr == &dev_attr_act_mask)
1847			bt->act_mask = value;
1848		else if (attr == &dev_attr_pid)
1849			bt->pid = value;
1850		else if (attr == &dev_attr_start_lba)
1851			bt->start_lba = value;
1852		else if (attr == &dev_attr_end_lba)
1853			bt->end_lba = value;
1854	}
1855
1856out_unlock_bdev:
1857	mutex_unlock(&q->debugfs_mutex);
 
 
1858out:
1859	return ret ? ret : count;
1860}
 
 
 
 
 
 
 
 
 
 
 
1861#endif /* CONFIG_BLK_DEV_IO_TRACE */
1862
1863#ifdef CONFIG_EVENT_TRACING
1864
1865/**
1866 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1867 * @rwbs:	buffer to be filled
1868 * @opf:	request operation type (REQ_OP_XXX) and flags for the tracepoint
1869 *
1870 * Description:
1871 *     Maps each request operation and flag to a single character and fills the
1872 *     buffer provided by the caller with resulting string.
1873 *
1874 **/
1875void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
1876{
1877	int i = 0;
1878
1879	if (opf & REQ_PREFLUSH)
1880		rwbs[i++] = 'F';
1881
1882	switch (opf & REQ_OP_MASK) {
1883	case REQ_OP_WRITE:
 
1884		rwbs[i++] = 'W';
1885		break;
1886	case REQ_OP_DISCARD:
1887		rwbs[i++] = 'D';
1888		break;
1889	case REQ_OP_SECURE_ERASE:
1890		rwbs[i++] = 'D';
1891		rwbs[i++] = 'E';
1892		break;
1893	case REQ_OP_FLUSH:
1894		rwbs[i++] = 'F';
1895		break;
1896	case REQ_OP_READ:
1897		rwbs[i++] = 'R';
1898		break;
1899	default:
1900		rwbs[i++] = 'N';
1901	}
1902
1903	if (opf & REQ_FUA)
1904		rwbs[i++] = 'F';
1905	if (opf & REQ_RAHEAD)
1906		rwbs[i++] = 'A';
1907	if (opf & REQ_SYNC)
1908		rwbs[i++] = 'S';
1909	if (opf & REQ_META)
1910		rwbs[i++] = 'M';
1911
1912	rwbs[i] = '\0';
1913}
1914EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1915
1916#endif /* CONFIG_EVENT_TRACING */
1917
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
   4 *
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/percpu.h>
  13#include <linux/init.h>
  14#include <linux/mutex.h>
  15#include <linux/slab.h>
  16#include <linux/debugfs.h>
  17#include <linux/export.h>
  18#include <linux/time.h>
  19#include <linux/uaccess.h>
  20#include <linux/list.h>
  21#include <linux/blk-cgroup.h>
  22
  23#include "../../block/blk.h"
  24
  25#include <trace/events/block.h>
  26
  27#include "trace_output.h"
  28
  29#ifdef CONFIG_BLK_DEV_IO_TRACE
  30
  31static unsigned int blktrace_seq __read_mostly = 1;
  32
  33static struct trace_array *blk_tr;
  34static bool blk_tracer_enabled __read_mostly;
  35
  36static LIST_HEAD(running_trace_list);
  37static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
  38
  39/* Select an alternative, minimalistic output than the original one */
  40#define TRACE_BLK_OPT_CLASSIC	0x1
  41#define TRACE_BLK_OPT_CGROUP	0x2
  42#define TRACE_BLK_OPT_CGNAME	0x4
  43
  44static struct tracer_opt blk_tracer_opts[] = {
  45	/* Default disable the minimalistic output */
  46	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  47#ifdef CONFIG_BLK_CGROUP
  48	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
  49	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
  50#endif
  51	{ }
  52};
  53
  54static struct tracer_flags blk_tracer_flags = {
  55	.val  = 0,
  56	.opts = blk_tracer_opts,
  57};
  58
  59/* Global reference count of probes */
  60static DEFINE_MUTEX(blk_probe_mutex);
  61static int blk_probes_ref;
  62
  63static void blk_register_tracepoints(void);
  64static void blk_unregister_tracepoints(void);
  65
  66/*
  67 * Send out a notify message.
  68 */
  69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  70		       const void *data, size_t len, u64 cgid)
  71{
  72	struct blk_io_trace *t;
  73	struct ring_buffer_event *event = NULL;
  74	struct trace_buffer *buffer = NULL;
  75	int pc = 0;
  76	int cpu = smp_processor_id();
  77	bool blk_tracer = blk_tracer_enabled;
  78	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
  79
  80	if (blk_tracer) {
  81		buffer = blk_tr->array_buffer.buffer;
  82		pc = preempt_count();
  83		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  84						  sizeof(*t) + len + cgid_len,
  85						  0, pc);
  86		if (!event)
  87			return;
  88		t = ring_buffer_event_data(event);
  89		goto record_it;
  90	}
  91
  92	if (!bt->rchan)
  93		return;
  94
  95	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
  96	if (t) {
  97		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  98		t->time = ktime_to_ns(ktime_get());
  99record_it:
 100		t->device = bt->dev;
 101		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
 102		t->pid = pid;
 103		t->cpu = cpu;
 104		t->pdu_len = len + cgid_len;
 105		if (cgid_len)
 106			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 107		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
 108
 109		if (blk_tracer)
 110			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 111	}
 112}
 113
 114/*
 115 * Send out a notify for this process, if we haven't done so since a trace
 116 * started
 117 */
 118static void trace_note_tsk(struct task_struct *tsk)
 119{
 120	unsigned long flags;
 121	struct blk_trace *bt;
 122
 123	tsk->btrace_seq = blktrace_seq;
 124	spin_lock_irqsave(&running_trace_lock, flags);
 125	list_for_each_entry(bt, &running_trace_list, running_list) {
 126		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 127			   sizeof(tsk->comm), 0);
 128	}
 129	spin_unlock_irqrestore(&running_trace_lock, flags);
 130}
 131
 132static void trace_note_time(struct blk_trace *bt)
 133{
 134	struct timespec64 now;
 135	unsigned long flags;
 136	u32 words[2];
 137
 138	/* need to check user space to see if this breaks in y2038 or y2106 */
 139	ktime_get_real_ts64(&now);
 140	words[0] = (u32)now.tv_sec;
 141	words[1] = now.tv_nsec;
 142
 143	local_irq_save(flags);
 144	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
 145	local_irq_restore(flags);
 146}
 147
 148void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
 149	const char *fmt, ...)
 150{
 151	int n;
 152	va_list args;
 153	unsigned long flags;
 154	char *buf;
 
 155
 156	if (unlikely(bt->trace_state != Blktrace_running &&
 157		     !blk_tracer_enabled))
 158		return;
 159
 160	/*
 161	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
 162	 * message to the trace.
 163	 */
 164	if (!(bt->act_mask & BLK_TC_NOTIFY))
 165		return;
 166
 167	local_irq_save(flags);
 168	buf = this_cpu_ptr(bt->msg_data);
 169	va_start(args, fmt);
 170	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
 171	va_end(args);
 172
 173	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 174		blkcg = NULL;
 175#ifdef CONFIG_BLK_CGROUP
 176	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
 177		   blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
 178#else
 179	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
 180#endif
 
 181	local_irq_restore(flags);
 182}
 183EXPORT_SYMBOL_GPL(__trace_note_message);
 184
 185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 186			 pid_t pid)
 187{
 188	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
 189		return 1;
 190	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
 191		return 1;
 192	if (bt->pid && pid != bt->pid)
 193		return 1;
 194
 195	return 0;
 196}
 197
 198/*
 199 * Data direction bit lookup
 200 */
 201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 202				 BLK_TC_ACT(BLK_TC_WRITE) };
 203
 204#define BLK_TC_RAHEAD		BLK_TC_AHEAD
 205#define BLK_TC_PREFLUSH		BLK_TC_FLUSH
 206
 207/* The ilog2() calls fall out because they're constant */
 208#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
 209	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 210
 211/*
 212 * The worker for the various blk_add_trace*() types. Fills out a
 213 * blk_io_trace structure and places it in a per-cpu subbuffer.
 214 */
 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 216		     int op, int op_flags, u32 what, int error, int pdu_len,
 217		     void *pdu_data, u64 cgid)
 218{
 219	struct task_struct *tsk = current;
 220	struct ring_buffer_event *event = NULL;
 221	struct trace_buffer *buffer = NULL;
 222	struct blk_io_trace *t;
 223	unsigned long flags = 0;
 224	unsigned long *sequence;
 
 225	pid_t pid;
 226	int cpu, pc = 0;
 227	bool blk_tracer = blk_tracer_enabled;
 228	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 
 229
 230	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
 231		return;
 232
 233	what |= ddir_act[op_is_write(op) ? WRITE : READ];
 234	what |= MASK_TC_BIT(op_flags, SYNC);
 235	what |= MASK_TC_BIT(op_flags, RAHEAD);
 236	what |= MASK_TC_BIT(op_flags, META);
 237	what |= MASK_TC_BIT(op_flags, PREFLUSH);
 238	what |= MASK_TC_BIT(op_flags, FUA);
 239	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
 240		what |= BLK_TC_ACT(BLK_TC_DISCARD);
 241	if (op == REQ_OP_FLUSH)
 242		what |= BLK_TC_ACT(BLK_TC_FLUSH);
 243	if (cgid)
 244		what |= __BLK_TA_CGROUP;
 245
 246	pid = tsk->pid;
 247	if (act_log_check(bt, what, sector, pid))
 248		return;
 249	cpu = raw_smp_processor_id();
 250
 251	if (blk_tracer) {
 252		tracing_record_cmdline(current);
 253
 254		buffer = blk_tr->array_buffer.buffer;
 255		pc = preempt_count();
 256		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 257						  sizeof(*t) + pdu_len + cgid_len,
 258						  0, pc);
 259		if (!event)
 260			return;
 261		t = ring_buffer_event_data(event);
 262		goto record_it;
 263	}
 264
 265	if (unlikely(tsk->btrace_seq != blktrace_seq))
 266		trace_note_tsk(tsk);
 267
 268	/*
 269	 * A word about the locking here - we disable interrupts to reserve
 270	 * some space in the relay per-cpu buffer, to prevent an irq
 271	 * from coming in and stepping on our toes.
 272	 */
 273	local_irq_save(flags);
 274	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
 275	if (t) {
 276		sequence = per_cpu_ptr(bt->sequence, cpu);
 277
 278		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
 279		t->sequence = ++(*sequence);
 280		t->time = ktime_to_ns(ktime_get());
 281record_it:
 282		/*
 283		 * These two are not needed in ftrace as they are in the
 284		 * generic trace_entry, filled by tracing_generic_entry_update,
 285		 * but for the trace_event->bin() synthesizer benefit we do it
 286		 * here too.
 287		 */
 288		t->cpu = cpu;
 289		t->pid = pid;
 290
 291		t->sector = sector;
 292		t->bytes = bytes;
 293		t->action = what;
 294		t->device = bt->dev;
 295		t->error = error;
 296		t->pdu_len = pdu_len + cgid_len;
 297
 298		if (cgid_len)
 299			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 300		if (pdu_len)
 301			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
 302
 303		if (blk_tracer) {
 304			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 305			return;
 306		}
 307	}
 308
 309	local_irq_restore(flags);
 310}
 311
 312static void blk_trace_free(struct blk_trace *bt)
 313{
 314	debugfs_remove(bt->msg_file);
 315	debugfs_remove(bt->dropped_file);
 316	relay_close(bt->rchan);
 317	debugfs_remove(bt->dir);
 
 
 
 
 
 
 
 
 
 
 318	free_percpu(bt->sequence);
 319	free_percpu(bt->msg_data);
 320	kfree(bt);
 321}
 322
 323static void get_probe_ref(void)
 324{
 325	mutex_lock(&blk_probe_mutex);
 326	if (++blk_probes_ref == 1)
 327		blk_register_tracepoints();
 328	mutex_unlock(&blk_probe_mutex);
 329}
 330
 331static void put_probe_ref(void)
 332{
 333	mutex_lock(&blk_probe_mutex);
 334	if (!--blk_probes_ref)
 335		blk_unregister_tracepoints();
 336	mutex_unlock(&blk_probe_mutex);
 337}
 338
 339static void blk_trace_cleanup(struct blk_trace *bt)
 340{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	synchronize_rcu();
 342	blk_trace_free(bt);
 343	put_probe_ref();
 344}
 345
 346static int __blk_trace_remove(struct request_queue *q)
 347{
 348	struct blk_trace *bt;
 349
 350	bt = rcu_replace_pointer(q->blk_trace, NULL,
 351				 lockdep_is_held(&q->debugfs_mutex));
 352	if (!bt)
 353		return -EINVAL;
 354
 355	if (bt->trace_state != Blktrace_running)
 356		blk_trace_cleanup(bt);
 357
 358	return 0;
 359}
 360
 361int blk_trace_remove(struct request_queue *q)
 362{
 363	int ret;
 364
 365	mutex_lock(&q->debugfs_mutex);
 366	ret = __blk_trace_remove(q);
 367	mutex_unlock(&q->debugfs_mutex);
 368
 369	return ret;
 370}
 371EXPORT_SYMBOL_GPL(blk_trace_remove);
 372
 373static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 374				size_t count, loff_t *ppos)
 375{
 376	struct blk_trace *bt = filp->private_data;
 377	char buf[16];
 378
 379	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
 380
 381	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 382}
 383
 384static const struct file_operations blk_dropped_fops = {
 385	.owner =	THIS_MODULE,
 386	.open =		simple_open,
 387	.read =		blk_dropped_read,
 388	.llseek =	default_llseek,
 389};
 390
 391static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 392				size_t count, loff_t *ppos)
 393{
 394	char *msg;
 395	struct blk_trace *bt;
 396
 397	if (count >= BLK_TN_MAX_MSG)
 398		return -EINVAL;
 399
 400	msg = memdup_user_nul(buffer, count);
 401	if (IS_ERR(msg))
 402		return PTR_ERR(msg);
 403
 404	bt = filp->private_data;
 405	__trace_note_message(bt, NULL, "%s", msg);
 406	kfree(msg);
 407
 408	return count;
 409}
 410
 411static const struct file_operations blk_msg_fops = {
 412	.owner =	THIS_MODULE,
 413	.open =		simple_open,
 414	.write =	blk_msg_write,
 415	.llseek =	noop_llseek,
 416};
 417
 418/*
 419 * Keep track of how many times we encountered a full subbuffer, to aid
 420 * the user space app in telling how many lost events there were.
 421 */
 422static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
 423				     void *prev_subbuf, size_t prev_padding)
 424{
 425	struct blk_trace *bt;
 426
 427	if (!relay_buf_full(buf))
 428		return 1;
 429
 430	bt = buf->chan->private_data;
 431	atomic_inc(&bt->dropped);
 432	return 0;
 433}
 434
 435static int blk_remove_buf_file_callback(struct dentry *dentry)
 436{
 437	debugfs_remove(dentry);
 438
 439	return 0;
 440}
 441
 442static struct dentry *blk_create_buf_file_callback(const char *filename,
 443						   struct dentry *parent,
 444						   umode_t mode,
 445						   struct rchan_buf *buf,
 446						   int *is_global)
 447{
 448	return debugfs_create_file(filename, mode, parent, buf,
 449					&relay_file_operations);
 450}
 451
 452static struct rchan_callbacks blk_relay_callbacks = {
 453	.subbuf_start		= blk_subbuf_start_callback,
 454	.create_buf_file	= blk_create_buf_file_callback,
 455	.remove_buf_file	= blk_remove_buf_file_callback,
 456};
 457
 458static void blk_trace_setup_lba(struct blk_trace *bt,
 459				struct block_device *bdev)
 460{
 461	struct hd_struct *part = NULL;
 462
 463	if (bdev)
 464		part = bdev->bd_part;
 465
 466	if (part) {
 467		bt->start_lba = part->start_sect;
 468		bt->end_lba = part->start_sect + part->nr_sects;
 469	} else {
 470		bt->start_lba = 0;
 471		bt->end_lba = -1ULL;
 472	}
 473}
 474
 475/*
 476 * Setup everything required to start tracing
 477 */
 478static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 479			      struct block_device *bdev,
 480			      struct blk_user_trace_setup *buts)
 481{
 482	struct blk_trace *bt = NULL;
 483	struct dentry *dir = NULL;
 484	int ret;
 485
 486	lockdep_assert_held(&q->debugfs_mutex);
 487
 488	if (!buts->buf_size || !buts->buf_nr)
 489		return -EINVAL;
 490
 491	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
 492	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
 493
 494	/*
 495	 * some device names have larger paths - convert the slashes
 496	 * to underscores for this to work as expected
 497	 */
 498	strreplace(buts->name, '/', '_');
 499
 500	/*
 501	 * bdev can be NULL, as with scsi-generic, this is a helpful as
 502	 * we can be.
 503	 */
 504	if (rcu_dereference_protected(q->blk_trace,
 505				      lockdep_is_held(&q->debugfs_mutex))) {
 506		pr_warn("Concurrent blktraces are not allowed on %s\n",
 507			buts->name);
 508		return -EBUSY;
 509	}
 510
 511	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
 512	if (!bt)
 513		return -ENOMEM;
 514
 515	ret = -ENOMEM;
 516	bt->sequence = alloc_percpu(unsigned long);
 517	if (!bt->sequence)
 518		goto err;
 519
 520	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
 521	if (!bt->msg_data)
 522		goto err;
 523
 524	/*
 525	 * When tracing the whole disk reuse the existing debugfs directory
 526	 * created by the block layer on init. For partitions block devices,
 527	 * and scsi-generic block devices we create a temporary new debugfs
 528	 * directory that will be removed once the trace ends.
 529	 */
 530	if (bdev && bdev == bdev->bd_contains)
 531		dir = q->debugfs_dir;
 532	else
 533		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
 534
 535	/*
 536	 * As blktrace relies on debugfs for its interface the debugfs directory
 537	 * is required, contrary to the usual mantra of not checking for debugfs
 538	 * files or directories.
 539	 */
 540	if (IS_ERR_OR_NULL(dir)) {
 541		pr_warn("debugfs_dir not present for %s so skipping\n",
 542			buts->name);
 543		ret = -ENOENT;
 544		goto err;
 545	}
 546
 547	bt->dev = dev;
 548	atomic_set(&bt->dropped, 0);
 549	INIT_LIST_HEAD(&bt->running_list);
 550
 551	ret = -EIO;
 552	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
 553					       &blk_dropped_fops);
 554
 555	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
 556
 557	bt->rchan = relay_open("trace", dir, buts->buf_size,
 558				buts->buf_nr, &blk_relay_callbacks, bt);
 559	if (!bt->rchan)
 560		goto err;
 561
 562	bt->act_mask = buts->act_mask;
 563	if (!bt->act_mask)
 564		bt->act_mask = (u16) -1;
 565
 566	blk_trace_setup_lba(bt, bdev);
 567
 568	/* overwrite with user settings */
 569	if (buts->start_lba)
 570		bt->start_lba = buts->start_lba;
 571	if (buts->end_lba)
 572		bt->end_lba = buts->end_lba;
 573
 574	bt->pid = buts->pid;
 575	bt->trace_state = Blktrace_setup;
 576
 577	rcu_assign_pointer(q->blk_trace, bt);
 578	get_probe_ref();
 579
 580	ret = 0;
 581err:
 582	if (ret)
 583		blk_trace_free(bt);
 584	return ret;
 585}
 586
 587static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 588			     struct block_device *bdev, char __user *arg)
 589{
 590	struct blk_user_trace_setup buts;
 591	int ret;
 592
 593	ret = copy_from_user(&buts, arg, sizeof(buts));
 594	if (ret)
 595		return -EFAULT;
 596
 597	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 598	if (ret)
 599		return ret;
 600
 601	if (copy_to_user(arg, &buts, sizeof(buts))) {
 602		__blk_trace_remove(q);
 603		return -EFAULT;
 604	}
 605	return 0;
 606}
 607
 608int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 609		    struct block_device *bdev,
 610		    char __user *arg)
 611{
 612	int ret;
 613
 614	mutex_lock(&q->debugfs_mutex);
 615	ret = __blk_trace_setup(q, name, dev, bdev, arg);
 616	mutex_unlock(&q->debugfs_mutex);
 617
 618	return ret;
 619}
 620EXPORT_SYMBOL_GPL(blk_trace_setup);
 621
 622#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 623static int compat_blk_trace_setup(struct request_queue *q, char *name,
 624				  dev_t dev, struct block_device *bdev,
 625				  char __user *arg)
 626{
 627	struct blk_user_trace_setup buts;
 628	struct compat_blk_user_trace_setup cbuts;
 629	int ret;
 630
 631	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
 632		return -EFAULT;
 633
 634	buts = (struct blk_user_trace_setup) {
 635		.act_mask = cbuts.act_mask,
 636		.buf_size = cbuts.buf_size,
 637		.buf_nr = cbuts.buf_nr,
 638		.start_lba = cbuts.start_lba,
 639		.end_lba = cbuts.end_lba,
 640		.pid = cbuts.pid,
 641	};
 642
 643	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 644	if (ret)
 645		return ret;
 646
 647	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 648		__blk_trace_remove(q);
 649		return -EFAULT;
 650	}
 651
 652	return 0;
 653}
 654#endif
 655
 656static int __blk_trace_startstop(struct request_queue *q, int start)
 657{
 658	int ret;
 659	struct blk_trace *bt;
 660
 661	bt = rcu_dereference_protected(q->blk_trace,
 662				       lockdep_is_held(&q->debugfs_mutex));
 663	if (bt == NULL)
 664		return -EINVAL;
 665
 666	/*
 667	 * For starting a trace, we can transition from a setup or stopped
 668	 * trace. For stopping a trace, the state must be running
 669	 */
 670	ret = -EINVAL;
 671	if (start) {
 672		if (bt->trace_state == Blktrace_setup ||
 673		    bt->trace_state == Blktrace_stopped) {
 674			blktrace_seq++;
 675			smp_mb();
 676			bt->trace_state = Blktrace_running;
 677			spin_lock_irq(&running_trace_lock);
 678			list_add(&bt->running_list, &running_trace_list);
 679			spin_unlock_irq(&running_trace_lock);
 680
 681			trace_note_time(bt);
 682			ret = 0;
 683		}
 684	} else {
 685		if (bt->trace_state == Blktrace_running) {
 686			bt->trace_state = Blktrace_stopped;
 687			spin_lock_irq(&running_trace_lock);
 688			list_del_init(&bt->running_list);
 689			spin_unlock_irq(&running_trace_lock);
 690			relay_flush(bt->rchan);
 691			ret = 0;
 692		}
 693	}
 694
 695	return ret;
 696}
 697
 698int blk_trace_startstop(struct request_queue *q, int start)
 699{
 700	int ret;
 701
 702	mutex_lock(&q->debugfs_mutex);
 703	ret = __blk_trace_startstop(q, start);
 704	mutex_unlock(&q->debugfs_mutex);
 705
 706	return ret;
 707}
 708EXPORT_SYMBOL_GPL(blk_trace_startstop);
 709
 710/*
 711 * When reading or writing the blktrace sysfs files, the references to the
 712 * opened sysfs or device files should prevent the underlying block device
 713 * from being removed. So no further delete protection is really needed.
 714 */
 715
 716/**
 717 * blk_trace_ioctl: - handle the ioctls associated with tracing
 718 * @bdev:	the block device
 719 * @cmd:	the ioctl cmd
 720 * @arg:	the argument data, if any
 721 *
 722 **/
 723int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 724{
 725	struct request_queue *q;
 726	int ret, start = 0;
 727	char b[BDEVNAME_SIZE];
 728
 729	q = bdev_get_queue(bdev);
 730	if (!q)
 731		return -ENXIO;
 732
 733	mutex_lock(&q->debugfs_mutex);
 734
 735	switch (cmd) {
 736	case BLKTRACESETUP:
 737		bdevname(bdev, b);
 738		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 739		break;
 740#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 741	case BLKTRACESETUP32:
 742		bdevname(bdev, b);
 743		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 744		break;
 745#endif
 746	case BLKTRACESTART:
 747		start = 1;
 748		fallthrough;
 749	case BLKTRACESTOP:
 750		ret = __blk_trace_startstop(q, start);
 751		break;
 752	case BLKTRACETEARDOWN:
 753		ret = __blk_trace_remove(q);
 754		break;
 755	default:
 756		ret = -ENOTTY;
 757		break;
 758	}
 759
 760	mutex_unlock(&q->debugfs_mutex);
 761	return ret;
 762}
 763
 764/**
 765 * blk_trace_shutdown: - stop and cleanup trace structures
 766 * @q:    the request queue associated with the device
 767 *
 768 **/
 769void blk_trace_shutdown(struct request_queue *q)
 770{
 771	mutex_lock(&q->debugfs_mutex);
 772	if (rcu_dereference_protected(q->blk_trace,
 773				      lockdep_is_held(&q->debugfs_mutex))) {
 774		__blk_trace_startstop(q, 0);
 775		__blk_trace_remove(q);
 776	}
 777
 778	mutex_unlock(&q->debugfs_mutex);
 779}
 780
 781#ifdef CONFIG_BLK_CGROUP
 782static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 783{
 
 784	struct blk_trace *bt;
 785
 786	/* We don't use the 'bt' value here except as an optimization... */
 787	bt = rcu_dereference_protected(q->blk_trace, 1);
 788	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 789		return 0;
 790
 791	if (!bio->bi_blkg)
 
 792		return 0;
 793	return cgroup_id(bio_blkcg(bio)->css.cgroup);
 794}
 795#else
 796u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 797{
 798	return 0;
 799}
 800#endif
 801
 802static u64
 803blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
 804{
 805	if (!rq->bio)
 806		return 0;
 807	/* Use the first bio */
 808	return blk_trace_bio_get_cgid(q, rq->bio);
 809}
 810
 811/*
 812 * blktrace probes
 813 */
 814
 815/**
 816 * blk_add_trace_rq - Add a trace for a request oriented action
 817 * @rq:		the source request
 818 * @error:	return status to log
 819 * @nr_bytes:	number of completed bytes
 820 * @what:	the action
 821 * @cgid:	the cgroup info
 822 *
 823 * Description:
 824 *     Records an action against a request. Will log the bio offset + size.
 825 *
 826 **/
 827static void blk_add_trace_rq(struct request *rq, int error,
 828			     unsigned int nr_bytes, u32 what, u64 cgid)
 829{
 830	struct blk_trace *bt;
 831
 832	rcu_read_lock();
 833	bt = rcu_dereference(rq->q->blk_trace);
 834	if (likely(!bt)) {
 835		rcu_read_unlock();
 836		return;
 837	}
 838
 839	if (blk_rq_is_passthrough(rq))
 840		what |= BLK_TC_ACT(BLK_TC_PC);
 841	else
 842		what |= BLK_TC_ACT(BLK_TC_FS);
 843
 844	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
 845			rq->cmd_flags, what, error, 0, NULL, cgid);
 846	rcu_read_unlock();
 847}
 848
 849static void blk_add_trace_rq_insert(void *ignore,
 850				    struct request_queue *q, struct request *rq)
 851{
 852	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
 853			 blk_trace_request_get_cgid(q, rq));
 854}
 855
 856static void blk_add_trace_rq_issue(void *ignore,
 857				   struct request_queue *q, struct request *rq)
 858{
 859	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
 860			 blk_trace_request_get_cgid(q, rq));
 861}
 862
 863static void blk_add_trace_rq_merge(void *ignore,
 864				   struct request_queue *q, struct request *rq)
 865{
 866	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
 867			 blk_trace_request_get_cgid(q, rq));
 868}
 869
 870static void blk_add_trace_rq_requeue(void *ignore,
 871				     struct request_queue *q,
 872				     struct request *rq)
 873{
 874	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
 875			 blk_trace_request_get_cgid(q, rq));
 876}
 877
 878static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
 879			int error, unsigned int nr_bytes)
 880{
 881	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
 882			 blk_trace_request_get_cgid(rq->q, rq));
 883}
 884
 885/**
 886 * blk_add_trace_bio - Add a trace for a bio oriented action
 887 * @q:		queue the io is for
 888 * @bio:	the source bio
 889 * @what:	the action
 890 * @error:	error, if any
 891 *
 892 * Description:
 893 *     Records an action against a bio. Will log the bio offset + size.
 894 *
 895 **/
 896static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 897			      u32 what, int error)
 898{
 899	struct blk_trace *bt;
 900
 901	rcu_read_lock();
 902	bt = rcu_dereference(q->blk_trace);
 903	if (likely(!bt)) {
 904		rcu_read_unlock();
 905		return;
 906	}
 907
 908	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 909			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
 910			blk_trace_bio_get_cgid(q, bio));
 911	rcu_read_unlock();
 912}
 913
 914static void blk_add_trace_bio_bounce(void *ignore,
 915				     struct request_queue *q, struct bio *bio)
 916{
 917	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 918}
 919
 920static void blk_add_trace_bio_complete(void *ignore,
 921				       struct request_queue *q, struct bio *bio)
 922{
 923	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
 924			  blk_status_to_errno(bio->bi_status));
 925}
 926
 927static void blk_add_trace_bio_backmerge(void *ignore,
 928					struct request_queue *q,
 929					struct request *rq,
 930					struct bio *bio)
 931{
 932	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 933}
 934
 935static void blk_add_trace_bio_frontmerge(void *ignore,
 936					 struct request_queue *q,
 937					 struct request *rq,
 938					 struct bio *bio)
 939{
 940	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 
 941}
 942
 943static void blk_add_trace_bio_queue(void *ignore,
 944				    struct request_queue *q, struct bio *bio)
 945{
 946	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 
 947}
 948
 949static void blk_add_trace_getrq(void *ignore,
 950				struct request_queue *q,
 951				struct bio *bio, int rw)
 952{
 953	if (bio)
 954		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 955	else {
 956		struct blk_trace *bt;
 957
 958		rcu_read_lock();
 959		bt = rcu_dereference(q->blk_trace);
 960		if (bt)
 961			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
 962					NULL, 0);
 963		rcu_read_unlock();
 964	}
 965}
 966
 967
 968static void blk_add_trace_sleeprq(void *ignore,
 969				  struct request_queue *q,
 970				  struct bio *bio, int rw)
 971{
 972	if (bio)
 973		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 974	else {
 975		struct blk_trace *bt;
 976
 977		rcu_read_lock();
 978		bt = rcu_dereference(q->blk_trace);
 979		if (bt)
 980			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
 981					0, 0, NULL, 0);
 982		rcu_read_unlock();
 983	}
 984}
 985
 986static void blk_add_trace_plug(void *ignore, struct request_queue *q)
 987{
 988	struct blk_trace *bt;
 989
 990	rcu_read_lock();
 991	bt = rcu_dereference(q->blk_trace);
 992	if (bt)
 993		__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
 994	rcu_read_unlock();
 995}
 996
 997static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
 998				    unsigned int depth, bool explicit)
 999{
1000	struct blk_trace *bt;
1001
1002	rcu_read_lock();
1003	bt = rcu_dereference(q->blk_trace);
1004	if (bt) {
1005		__be64 rpdu = cpu_to_be64(depth);
1006		u32 what;
1007
1008		if (explicit)
1009			what = BLK_TA_UNPLUG_IO;
1010		else
1011			what = BLK_TA_UNPLUG_TIMER;
1012
1013		__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
1014	}
1015	rcu_read_unlock();
1016}
1017
1018static void blk_add_trace_split(void *ignore,
1019				struct request_queue *q, struct bio *bio,
1020				unsigned int pdu)
1021{
 
1022	struct blk_trace *bt;
1023
1024	rcu_read_lock();
1025	bt = rcu_dereference(q->blk_trace);
1026	if (bt) {
1027		__be64 rpdu = cpu_to_be64(pdu);
1028
1029		__blk_add_trace(bt, bio->bi_iter.bi_sector,
1030				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1031				BLK_TA_SPLIT,
1032				blk_status_to_errno(bio->bi_status),
1033				sizeof(rpdu), &rpdu,
1034				blk_trace_bio_get_cgid(q, bio));
1035	}
1036	rcu_read_unlock();
1037}
1038
1039/**
1040 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1041 * @ignore:	trace callback data parameter (not used)
1042 * @q:		queue the io is for
1043 * @bio:	the source bio
1044 * @dev:	target device
1045 * @from:	source sector
1046 *
1047 * Description:
1048 *     Device mapper or raid target sometimes need to split a bio because
1049 *     it spans a stripe (or similar). Add a trace for that action.
1050 *
1051 **/
1052static void blk_add_trace_bio_remap(void *ignore,
1053				    struct request_queue *q, struct bio *bio,
1054				    dev_t dev, sector_t from)
1055{
 
1056	struct blk_trace *bt;
1057	struct blk_io_trace_remap r;
1058
1059	rcu_read_lock();
1060	bt = rcu_dereference(q->blk_trace);
1061	if (likely(!bt)) {
1062		rcu_read_unlock();
1063		return;
1064	}
1065
1066	r.device_from = cpu_to_be32(dev);
1067	r.device_to   = cpu_to_be32(bio_dev(bio));
1068	r.sector_from = cpu_to_be64(from);
1069
1070	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1071			bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1072			blk_status_to_errno(bio->bi_status),
1073			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1074	rcu_read_unlock();
1075}
1076
1077/**
1078 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1079 * @ignore:	trace callback data parameter (not used)
1080 * @q:		queue the io is for
1081 * @rq:		the source request
1082 * @dev:	target device
1083 * @from:	source sector
1084 *
1085 * Description:
1086 *     Device mapper remaps request to other devices.
1087 *     Add a trace for that action.
1088 *
1089 **/
1090static void blk_add_trace_rq_remap(void *ignore,
1091				   struct request_queue *q,
1092				   struct request *rq, dev_t dev,
1093				   sector_t from)
1094{
1095	struct blk_trace *bt;
1096	struct blk_io_trace_remap r;
1097
1098	rcu_read_lock();
1099	bt = rcu_dereference(q->blk_trace);
1100	if (likely(!bt)) {
1101		rcu_read_unlock();
1102		return;
1103	}
1104
1105	r.device_from = cpu_to_be32(dev);
1106	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1107	r.sector_from = cpu_to_be64(from);
1108
1109	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1110			rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1111			sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1112	rcu_read_unlock();
1113}
1114
1115/**
1116 * blk_add_driver_data - Add binary message with driver-specific data
1117 * @q:		queue the io is for
1118 * @rq:		io request
1119 * @data:	driver-specific data
1120 * @len:	length of driver-specific data
1121 *
1122 * Description:
1123 *     Some drivers might want to write driver-specific data per request.
1124 *
1125 **/
1126void blk_add_driver_data(struct request_queue *q,
1127			 struct request *rq,
1128			 void *data, size_t len)
1129{
1130	struct blk_trace *bt;
1131
1132	rcu_read_lock();
1133	bt = rcu_dereference(q->blk_trace);
1134	if (likely(!bt)) {
1135		rcu_read_unlock();
1136		return;
1137	}
1138
1139	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1140				BLK_TA_DRV_DATA, 0, len, data,
1141				blk_trace_request_get_cgid(q, rq));
1142	rcu_read_unlock();
1143}
1144EXPORT_SYMBOL_GPL(blk_add_driver_data);
1145
1146static void blk_register_tracepoints(void)
1147{
1148	int ret;
1149
1150	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1151	WARN_ON(ret);
1152	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1153	WARN_ON(ret);
1154	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1155	WARN_ON(ret);
1156	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1157	WARN_ON(ret);
1158	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1159	WARN_ON(ret);
1160	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1161	WARN_ON(ret);
1162	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1163	WARN_ON(ret);
1164	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1165	WARN_ON(ret);
1166	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1167	WARN_ON(ret);
1168	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1169	WARN_ON(ret);
1170	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1171	WARN_ON(ret);
1172	ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1173	WARN_ON(ret);
1174	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1175	WARN_ON(ret);
1176	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1177	WARN_ON(ret);
1178	ret = register_trace_block_split(blk_add_trace_split, NULL);
1179	WARN_ON(ret);
1180	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1181	WARN_ON(ret);
1182	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1183	WARN_ON(ret);
1184}
1185
1186static void blk_unregister_tracepoints(void)
1187{
1188	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1189	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1190	unregister_trace_block_split(blk_add_trace_split, NULL);
1191	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1192	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1193	unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1194	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1195	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1196	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1197	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1198	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1199	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1200	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1201	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1202	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1203	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1204	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1205
1206	tracepoint_synchronize_unregister();
1207}
1208
1209/*
1210 * struct blk_io_tracer formatting routines
1211 */
1212
1213static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1214{
1215	int i = 0;
1216	int tc = t->action >> BLK_TC_SHIFT;
1217
1218	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1219		rwbs[i++] = 'N';
1220		goto out;
1221	}
1222
1223	if (tc & BLK_TC_FLUSH)
1224		rwbs[i++] = 'F';
1225
1226	if (tc & BLK_TC_DISCARD)
1227		rwbs[i++] = 'D';
1228	else if (tc & BLK_TC_WRITE)
1229		rwbs[i++] = 'W';
1230	else if (t->bytes)
1231		rwbs[i++] = 'R';
1232	else
1233		rwbs[i++] = 'N';
1234
1235	if (tc & BLK_TC_FUA)
1236		rwbs[i++] = 'F';
1237	if (tc & BLK_TC_AHEAD)
1238		rwbs[i++] = 'A';
1239	if (tc & BLK_TC_SYNC)
1240		rwbs[i++] = 'S';
1241	if (tc & BLK_TC_META)
1242		rwbs[i++] = 'M';
1243out:
1244	rwbs[i] = '\0';
1245}
1246
1247static inline
1248const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1249{
1250	return (const struct blk_io_trace *)ent;
1251}
1252
1253static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1254{
1255	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1256}
1257
1258static inline u64 t_cgid(const struct trace_entry *ent)
1259{
1260	return *(u64 *)(te_blk_io_trace(ent) + 1);
1261}
1262
1263static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1264{
1265	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1266}
1267
1268static inline u32 t_action(const struct trace_entry *ent)
1269{
1270	return te_blk_io_trace(ent)->action;
1271}
1272
1273static inline u32 t_bytes(const struct trace_entry *ent)
1274{
1275	return te_blk_io_trace(ent)->bytes;
1276}
1277
1278static inline u32 t_sec(const struct trace_entry *ent)
1279{
1280	return te_blk_io_trace(ent)->bytes >> 9;
1281}
1282
1283static inline unsigned long long t_sector(const struct trace_entry *ent)
1284{
1285	return te_blk_io_trace(ent)->sector;
1286}
1287
1288static inline __u16 t_error(const struct trace_entry *ent)
1289{
1290	return te_blk_io_trace(ent)->error;
1291}
1292
1293static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1294{
1295	const __be64 *val = pdu_start(ent, has_cg);
1296	return be64_to_cpu(*val);
1297}
1298
1299typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1300	bool has_cg);
1301
1302static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1303	bool has_cg)
1304{
1305	char rwbs[RWBS_LEN];
1306	unsigned long long ts  = iter->ts;
1307	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1308	unsigned secs	       = (unsigned long)ts;
1309	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1310
1311	fill_rwbs(rwbs, t);
1312
1313	trace_seq_printf(&iter->seq,
1314			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1315			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1316			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1317}
1318
1319static void blk_log_action(struct trace_iterator *iter, const char *act,
1320	bool has_cg)
1321{
1322	char rwbs[RWBS_LEN];
1323	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1324
1325	fill_rwbs(rwbs, t);
1326	if (has_cg) {
1327		u64 id = t_cgid(iter->ent);
1328
1329		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1330			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1331
1332			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1333				sizeof(blkcg_name_buf));
1334			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1335				 MAJOR(t->device), MINOR(t->device),
1336				 blkcg_name_buf, act, rwbs);
1337		} else {
1338			/*
1339			 * The cgid portion used to be "INO,GEN".  Userland
1340			 * builds a FILEID_INO32_GEN fid out of them and
1341			 * opens the cgroup using open_by_handle_at(2).
1342			 * While 32bit ino setups are still the same, 64bit
1343			 * ones now use the 64bit ino as the whole ID and
1344			 * no longer use generation.
1345			 *
1346			 * Regarldess of the content, always output
1347			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1348			 * be mapped back to @id on both 64 and 32bit ino
1349			 * setups.  See __kernfs_fh_to_dentry().
1350			 */
1351			trace_seq_printf(&iter->seq,
1352				 "%3d,%-3d %llx,%-llx %2s %3s ",
1353				 MAJOR(t->device), MINOR(t->device),
1354				 id & U32_MAX, id >> 32, act, rwbs);
1355		}
1356	} else
1357		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1358				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1359}
1360
1361static void blk_log_dump_pdu(struct trace_seq *s,
1362	const struct trace_entry *ent, bool has_cg)
1363{
1364	const unsigned char *pdu_buf;
1365	int pdu_len;
1366	int i, end;
1367
1368	pdu_buf = pdu_start(ent, has_cg);
1369	pdu_len = pdu_real_len(ent, has_cg);
1370
1371	if (!pdu_len)
1372		return;
1373
1374	/* find the last zero that needs to be printed */
1375	for (end = pdu_len - 1; end >= 0; end--)
1376		if (pdu_buf[end])
1377			break;
1378	end++;
1379
1380	trace_seq_putc(s, '(');
1381
1382	for (i = 0; i < pdu_len; i++) {
1383
1384		trace_seq_printf(s, "%s%02x",
1385				 i == 0 ? "" : " ", pdu_buf[i]);
1386
1387		/*
1388		 * stop when the rest is just zeroes and indicate so
1389		 * with a ".." appended
1390		 */
1391		if (i == end && end != pdu_len - 1) {
1392			trace_seq_puts(s, " ..) ");
1393			return;
1394		}
1395	}
1396
1397	trace_seq_puts(s, ") ");
1398}
1399
1400static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1401{
1402	char cmd[TASK_COMM_LEN];
1403
1404	trace_find_cmdline(ent->pid, cmd);
1405
1406	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1407		trace_seq_printf(s, "%u ", t_bytes(ent));
1408		blk_log_dump_pdu(s, ent, has_cg);
1409		trace_seq_printf(s, "[%s]\n", cmd);
1410	} else {
1411		if (t_sec(ent))
1412			trace_seq_printf(s, "%llu + %u [%s]\n",
1413						t_sector(ent), t_sec(ent), cmd);
1414		else
1415			trace_seq_printf(s, "[%s]\n", cmd);
1416	}
1417}
1418
1419static void blk_log_with_error(struct trace_seq *s,
1420			      const struct trace_entry *ent, bool has_cg)
1421{
1422	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1423		blk_log_dump_pdu(s, ent, has_cg);
1424		trace_seq_printf(s, "[%d]\n", t_error(ent));
1425	} else {
1426		if (t_sec(ent))
1427			trace_seq_printf(s, "%llu + %u [%d]\n",
1428					 t_sector(ent),
1429					 t_sec(ent), t_error(ent));
1430		else
1431			trace_seq_printf(s, "%llu [%d]\n",
1432					 t_sector(ent), t_error(ent));
1433	}
1434}
1435
1436static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1437{
1438	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1439
1440	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1441			 t_sector(ent), t_sec(ent),
1442			 MAJOR(be32_to_cpu(__r->device_from)),
1443			 MINOR(be32_to_cpu(__r->device_from)),
1444			 be64_to_cpu(__r->sector_from));
1445}
1446
1447static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1448{
1449	char cmd[TASK_COMM_LEN];
1450
1451	trace_find_cmdline(ent->pid, cmd);
1452
1453	trace_seq_printf(s, "[%s]\n", cmd);
1454}
1455
1456static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1457{
1458	char cmd[TASK_COMM_LEN];
1459
1460	trace_find_cmdline(ent->pid, cmd);
1461
1462	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1463}
1464
1465static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1466{
1467	char cmd[TASK_COMM_LEN];
1468
1469	trace_find_cmdline(ent->pid, cmd);
1470
1471	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1472			 get_pdu_int(ent, has_cg), cmd);
1473}
1474
1475static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1476			bool has_cg)
1477{
1478
1479	trace_seq_putmem(s, pdu_start(ent, has_cg),
1480		pdu_real_len(ent, has_cg));
1481	trace_seq_putc(s, '\n');
1482}
1483
1484/*
1485 * struct tracer operations
1486 */
1487
1488static void blk_tracer_print_header(struct seq_file *m)
1489{
1490	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1491		return;
1492	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1493		    "#  |     |     |           |   |   |\n");
1494}
1495
1496static void blk_tracer_start(struct trace_array *tr)
1497{
1498	blk_tracer_enabled = true;
1499}
1500
1501static int blk_tracer_init(struct trace_array *tr)
1502{
1503	blk_tr = tr;
1504	blk_tracer_start(tr);
1505	return 0;
1506}
1507
1508static void blk_tracer_stop(struct trace_array *tr)
1509{
1510	blk_tracer_enabled = false;
1511}
1512
1513static void blk_tracer_reset(struct trace_array *tr)
1514{
1515	blk_tracer_stop(tr);
1516}
1517
1518static const struct {
1519	const char *act[2];
1520	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1521			    bool has_cg);
1522} what2act[] = {
1523	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1524	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1525	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1526	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1527	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1528	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1529	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1530	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1531	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1532	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1533	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1534	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1535	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1536	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1537	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1538};
1539
1540static enum print_line_t print_one_line(struct trace_iterator *iter,
1541					bool classic)
1542{
1543	struct trace_array *tr = iter->tr;
1544	struct trace_seq *s = &iter->seq;
1545	const struct blk_io_trace *t;
1546	u16 what;
1547	bool long_act;
1548	blk_log_action_t *log_action;
1549	bool has_cg;
1550
1551	t	   = te_blk_io_trace(iter->ent);
1552	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1553	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1554	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1555	has_cg	   = t->action & __BLK_TA_CGROUP;
1556
1557	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1558		log_action(iter, long_act ? "message" : "m", has_cg);
1559		blk_log_msg(s, iter->ent, has_cg);
1560		return trace_handle_return(s);
1561	}
1562
1563	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1564		trace_seq_printf(s, "Unknown action %x\n", what);
1565	else {
1566		log_action(iter, what2act[what].act[long_act], has_cg);
1567		what2act[what].print(s, iter->ent, has_cg);
1568	}
1569
1570	return trace_handle_return(s);
1571}
1572
1573static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1574					       int flags, struct trace_event *event)
1575{
1576	return print_one_line(iter, false);
1577}
1578
1579static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1580{
1581	struct trace_seq *s = &iter->seq;
1582	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1583	const int offset = offsetof(struct blk_io_trace, sector);
1584	struct blk_io_trace old = {
1585		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1586		.time     = iter->ts,
1587	};
1588
1589	trace_seq_putmem(s, &old, offset);
1590	trace_seq_putmem(s, &t->sector,
1591			 sizeof(old) - offset + t->pdu_len);
1592}
1593
1594static enum print_line_t
1595blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1596			     struct trace_event *event)
1597{
1598	blk_trace_synthesize_old_trace(iter);
1599
1600	return trace_handle_return(&iter->seq);
1601}
1602
1603static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1604{
1605	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
 
1606		return TRACE_TYPE_UNHANDLED;
1607
1608	return print_one_line(iter, true);
1609}
1610
1611static int
1612blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1613{
1614	/* don't output context-info for blk_classic output */
1615	if (bit == TRACE_BLK_OPT_CLASSIC) {
1616		if (set)
1617			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1618		else
1619			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1620	}
1621	return 0;
1622}
1623
1624static struct tracer blk_tracer __read_mostly = {
1625	.name		= "blk",
1626	.init		= blk_tracer_init,
1627	.reset		= blk_tracer_reset,
1628	.start		= blk_tracer_start,
1629	.stop		= blk_tracer_stop,
1630	.print_header	= blk_tracer_print_header,
1631	.print_line	= blk_tracer_print_line,
1632	.flags		= &blk_tracer_flags,
1633	.set_flag	= blk_tracer_set_flag,
1634};
1635
1636static struct trace_event_functions trace_blk_event_funcs = {
1637	.trace		= blk_trace_event_print,
1638	.binary		= blk_trace_event_print_binary,
1639};
1640
1641static struct trace_event trace_blk_event = {
1642	.type		= TRACE_BLK,
1643	.funcs		= &trace_blk_event_funcs,
1644};
1645
1646static int __init init_blk_tracer(void)
1647{
1648	if (!register_trace_event(&trace_blk_event)) {
1649		pr_warn("Warning: could not register block events\n");
1650		return 1;
1651	}
1652
1653	if (register_tracer(&blk_tracer) != 0) {
1654		pr_warn("Warning: could not register the block tracer\n");
1655		unregister_trace_event(&trace_blk_event);
1656		return 1;
1657	}
1658
1659	return 0;
1660}
1661
1662device_initcall(init_blk_tracer);
1663
1664static int blk_trace_remove_queue(struct request_queue *q)
1665{
1666	struct blk_trace *bt;
1667
1668	bt = rcu_replace_pointer(q->blk_trace, NULL,
1669				 lockdep_is_held(&q->debugfs_mutex));
1670	if (bt == NULL)
1671		return -EINVAL;
1672
 
 
1673	put_probe_ref();
1674	synchronize_rcu();
1675	blk_trace_free(bt);
1676	return 0;
1677}
1678
1679/*
1680 * Setup everything required to start tracing
1681 */
1682static int blk_trace_setup_queue(struct request_queue *q,
1683				 struct block_device *bdev)
1684{
1685	struct blk_trace *bt = NULL;
1686	int ret = -ENOMEM;
1687
1688	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1689	if (!bt)
1690		return -ENOMEM;
1691
1692	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1693	if (!bt->msg_data)
1694		goto free_bt;
1695
1696	bt->dev = bdev->bd_dev;
1697	bt->act_mask = (u16)-1;
1698
1699	blk_trace_setup_lba(bt, bdev);
1700
1701	rcu_assign_pointer(q->blk_trace, bt);
1702	get_probe_ref();
1703	return 0;
1704
1705free_bt:
1706	blk_trace_free(bt);
1707	return ret;
1708}
1709
1710/*
1711 * sysfs interface to enable and configure tracing
1712 */
1713
1714static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1715					 struct device_attribute *attr,
1716					 char *buf);
1717static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1718					  struct device_attribute *attr,
1719					  const char *buf, size_t count);
1720#define BLK_TRACE_DEVICE_ATTR(_name) \
1721	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1722		    sysfs_blk_trace_attr_show, \
1723		    sysfs_blk_trace_attr_store)
1724
1725static BLK_TRACE_DEVICE_ATTR(enable);
1726static BLK_TRACE_DEVICE_ATTR(act_mask);
1727static BLK_TRACE_DEVICE_ATTR(pid);
1728static BLK_TRACE_DEVICE_ATTR(start_lba);
1729static BLK_TRACE_DEVICE_ATTR(end_lba);
1730
1731static struct attribute *blk_trace_attrs[] = {
1732	&dev_attr_enable.attr,
1733	&dev_attr_act_mask.attr,
1734	&dev_attr_pid.attr,
1735	&dev_attr_start_lba.attr,
1736	&dev_attr_end_lba.attr,
1737	NULL
1738};
1739
1740struct attribute_group blk_trace_attr_group = {
1741	.name  = "trace",
1742	.attrs = blk_trace_attrs,
1743};
1744
1745static const struct {
1746	int mask;
1747	const char *str;
1748} mask_maps[] = {
1749	{ BLK_TC_READ,		"read"		},
1750	{ BLK_TC_WRITE,		"write"		},
1751	{ BLK_TC_FLUSH,		"flush"		},
1752	{ BLK_TC_SYNC,		"sync"		},
1753	{ BLK_TC_QUEUE,		"queue"		},
1754	{ BLK_TC_REQUEUE,	"requeue"	},
1755	{ BLK_TC_ISSUE,		"issue"		},
1756	{ BLK_TC_COMPLETE,	"complete"	},
1757	{ BLK_TC_FS,		"fs"		},
1758	{ BLK_TC_PC,		"pc"		},
1759	{ BLK_TC_NOTIFY,	"notify"	},
1760	{ BLK_TC_AHEAD,		"ahead"		},
1761	{ BLK_TC_META,		"meta"		},
1762	{ BLK_TC_DISCARD,	"discard"	},
1763	{ BLK_TC_DRV_DATA,	"drv_data"	},
1764	{ BLK_TC_FUA,		"fua"		},
1765};
1766
1767static int blk_trace_str2mask(const char *str)
1768{
1769	int i;
1770	int mask = 0;
1771	char *buf, *s, *token;
1772
1773	buf = kstrdup(str, GFP_KERNEL);
1774	if (buf == NULL)
1775		return -ENOMEM;
1776	s = strstrip(buf);
1777
1778	while (1) {
1779		token = strsep(&s, ",");
1780		if (token == NULL)
1781			break;
1782
1783		if (*token == '\0')
1784			continue;
1785
1786		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1787			if (strcasecmp(token, mask_maps[i].str) == 0) {
1788				mask |= mask_maps[i].mask;
1789				break;
1790			}
1791		}
1792		if (i == ARRAY_SIZE(mask_maps)) {
1793			mask = -EINVAL;
1794			break;
1795		}
1796	}
1797	kfree(buf);
1798
1799	return mask;
1800}
1801
1802static ssize_t blk_trace_mask2str(char *buf, int mask)
1803{
1804	int i;
1805	char *p = buf;
1806
1807	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1808		if (mask & mask_maps[i].mask) {
1809			p += sprintf(p, "%s%s",
1810				    (p == buf) ? "" : ",", mask_maps[i].str);
1811		}
1812	}
1813	*p++ = '\n';
1814
1815	return p - buf;
1816}
1817
1818static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1819{
1820	if (bdev->bd_disk == NULL)
1821		return NULL;
1822
1823	return bdev_get_queue(bdev);
1824}
1825
1826static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1827					 struct device_attribute *attr,
1828					 char *buf)
1829{
1830	struct hd_struct *p = dev_to_part(dev);
1831	struct request_queue *q;
1832	struct block_device *bdev;
1833	struct blk_trace *bt;
1834	ssize_t ret = -ENXIO;
1835
1836	bdev = bdget(part_devt(p));
1837	if (bdev == NULL)
1838		goto out;
1839
1840	q = blk_trace_get_queue(bdev);
1841	if (q == NULL)
1842		goto out_bdput;
1843
1844	mutex_lock(&q->debugfs_mutex);
1845
1846	bt = rcu_dereference_protected(q->blk_trace,
1847				       lockdep_is_held(&q->debugfs_mutex));
1848	if (attr == &dev_attr_enable) {
1849		ret = sprintf(buf, "%u\n", !!bt);
1850		goto out_unlock_bdev;
1851	}
1852
1853	if (bt == NULL)
1854		ret = sprintf(buf, "disabled\n");
1855	else if (attr == &dev_attr_act_mask)
1856		ret = blk_trace_mask2str(buf, bt->act_mask);
1857	else if (attr == &dev_attr_pid)
1858		ret = sprintf(buf, "%u\n", bt->pid);
1859	else if (attr == &dev_attr_start_lba)
1860		ret = sprintf(buf, "%llu\n", bt->start_lba);
1861	else if (attr == &dev_attr_end_lba)
1862		ret = sprintf(buf, "%llu\n", bt->end_lba);
1863
1864out_unlock_bdev:
1865	mutex_unlock(&q->debugfs_mutex);
1866out_bdput:
1867	bdput(bdev);
1868out:
1869	return ret;
1870}
1871
1872static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1873					  struct device_attribute *attr,
1874					  const char *buf, size_t count)
1875{
1876	struct block_device *bdev;
1877	struct request_queue *q;
1878	struct hd_struct *p;
1879	struct blk_trace *bt;
1880	u64 value;
1881	ssize_t ret = -EINVAL;
1882
1883	if (count == 0)
1884		goto out;
1885
1886	if (attr == &dev_attr_act_mask) {
1887		if (kstrtoull(buf, 0, &value)) {
1888			/* Assume it is a list of trace category names */
1889			ret = blk_trace_str2mask(buf);
1890			if (ret < 0)
1891				goto out;
1892			value = ret;
1893		}
1894	} else if (kstrtoull(buf, 0, &value))
1895		goto out;
1896
1897	ret = -ENXIO;
1898
1899	p = dev_to_part(dev);
1900	bdev = bdget(part_devt(p));
1901	if (bdev == NULL)
1902		goto out;
1903
1904	q = blk_trace_get_queue(bdev);
1905	if (q == NULL)
1906		goto out_bdput;
1907
1908	mutex_lock(&q->debugfs_mutex);
1909
1910	bt = rcu_dereference_protected(q->blk_trace,
1911				       lockdep_is_held(&q->debugfs_mutex));
1912	if (attr == &dev_attr_enable) {
1913		if (!!value == !!bt) {
1914			ret = 0;
1915			goto out_unlock_bdev;
1916		}
1917		if (value)
1918			ret = blk_trace_setup_queue(q, bdev);
1919		else
1920			ret = blk_trace_remove_queue(q);
1921		goto out_unlock_bdev;
1922	}
1923
1924	ret = 0;
1925	if (bt == NULL) {
1926		ret = blk_trace_setup_queue(q, bdev);
1927		bt = rcu_dereference_protected(q->blk_trace,
1928				lockdep_is_held(&q->debugfs_mutex));
1929	}
1930
1931	if (ret == 0) {
1932		if (attr == &dev_attr_act_mask)
1933			bt->act_mask = value;
1934		else if (attr == &dev_attr_pid)
1935			bt->pid = value;
1936		else if (attr == &dev_attr_start_lba)
1937			bt->start_lba = value;
1938		else if (attr == &dev_attr_end_lba)
1939			bt->end_lba = value;
1940	}
1941
1942out_unlock_bdev:
1943	mutex_unlock(&q->debugfs_mutex);
1944out_bdput:
1945	bdput(bdev);
1946out:
1947	return ret ? ret : count;
1948}
1949
1950int blk_trace_init_sysfs(struct device *dev)
1951{
1952	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1953}
1954
1955void blk_trace_remove_sysfs(struct device *dev)
1956{
1957	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1958}
1959
1960#endif /* CONFIG_BLK_DEV_IO_TRACE */
1961
1962#ifdef CONFIG_EVENT_TRACING
1963
1964void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
 
 
 
 
 
 
 
 
 
 
1965{
1966	int i = 0;
1967
1968	if (op & REQ_PREFLUSH)
1969		rwbs[i++] = 'F';
1970
1971	switch (op & REQ_OP_MASK) {
1972	case REQ_OP_WRITE:
1973	case REQ_OP_WRITE_SAME:
1974		rwbs[i++] = 'W';
1975		break;
1976	case REQ_OP_DISCARD:
1977		rwbs[i++] = 'D';
1978		break;
1979	case REQ_OP_SECURE_ERASE:
1980		rwbs[i++] = 'D';
1981		rwbs[i++] = 'E';
1982		break;
1983	case REQ_OP_FLUSH:
1984		rwbs[i++] = 'F';
1985		break;
1986	case REQ_OP_READ:
1987		rwbs[i++] = 'R';
1988		break;
1989	default:
1990		rwbs[i++] = 'N';
1991	}
1992
1993	if (op & REQ_FUA)
1994		rwbs[i++] = 'F';
1995	if (op & REQ_RAHEAD)
1996		rwbs[i++] = 'A';
1997	if (op & REQ_SYNC)
1998		rwbs[i++] = 'S';
1999	if (op & REQ_META)
2000		rwbs[i++] = 'M';
2001
2002	rwbs[i] = '\0';
2003}
2004EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2005
2006#endif /* CONFIG_EVENT_TRACING */
2007