Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
   4 *
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/percpu.h>
  13#include <linux/init.h>
  14#include <linux/mutex.h>
  15#include <linux/slab.h>
  16#include <linux/debugfs.h>
  17#include <linux/export.h>
  18#include <linux/time.h>
  19#include <linux/uaccess.h>
  20#include <linux/list.h>
  21#include <linux/blk-cgroup.h>
  22
  23#include "../../block/blk.h"
  24
  25#include <trace/events/block.h>
  26
  27#include "trace_output.h"
  28
  29#ifdef CONFIG_BLK_DEV_IO_TRACE
  30
  31static unsigned int blktrace_seq __read_mostly = 1;
  32
  33static struct trace_array *blk_tr;
  34static bool blk_tracer_enabled __read_mostly;
  35
  36static LIST_HEAD(running_trace_list);
  37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
  38
  39/* Select an alternative, minimalistic output than the original one */
  40#define TRACE_BLK_OPT_CLASSIC	0x1
  41#define TRACE_BLK_OPT_CGROUP	0x2
  42#define TRACE_BLK_OPT_CGNAME	0x4
  43
  44static struct tracer_opt blk_tracer_opts[] = {
  45	/* Default disable the minimalistic output */
  46	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  47#ifdef CONFIG_BLK_CGROUP
  48	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
  49	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
  50#endif
  51	{ }
  52};
  53
  54static struct tracer_flags blk_tracer_flags = {
  55	.val  = 0,
  56	.opts = blk_tracer_opts,
  57};
  58
  59/* Global reference count of probes */
  60static DEFINE_MUTEX(blk_probe_mutex);
  61static int blk_probes_ref;
  62
  63static void blk_register_tracepoints(void);
  64static void blk_unregister_tracepoints(void);
  65
  66/*
  67 * Send out a notify message.
  68 */
  69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  70		       const void *data, size_t len, u64 cgid)
  71{
  72	struct blk_io_trace *t;
  73	struct ring_buffer_event *event = NULL;
  74	struct trace_buffer *buffer = NULL;
  75	unsigned int trace_ctx = 0;
  76	int cpu = smp_processor_id();
  77	bool blk_tracer = blk_tracer_enabled;
  78	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
  79
  80	if (blk_tracer) {
  81		buffer = blk_tr->array_buffer.buffer;
  82		trace_ctx = tracing_gen_ctx_flags(0);
  83		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  84						  sizeof(*t) + len + cgid_len,
  85						  trace_ctx);
  86		if (!event)
  87			return;
  88		t = ring_buffer_event_data(event);
  89		goto record_it;
  90	}
  91
  92	if (!bt->rchan)
  93		return;
  94
  95	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
  96	if (t) {
  97		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  98		t->time = ktime_to_ns(ktime_get());
  99record_it:
 100		t->device = bt->dev;
 101		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
 102		t->pid = pid;
 103		t->cpu = cpu;
 104		t->pdu_len = len + cgid_len;
 105		if (cgid_len)
 106			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 107		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
 108
 109		if (blk_tracer)
 110			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
 111	}
 112}
 113
 114/*
 115 * Send out a notify for this process, if we haven't done so since a trace
 116 * started
 117 */
 118static void trace_note_tsk(struct task_struct *tsk)
 119{
 120	unsigned long flags;
 121	struct blk_trace *bt;
 122
 123	tsk->btrace_seq = blktrace_seq;
 124	raw_spin_lock_irqsave(&running_trace_lock, flags);
 125	list_for_each_entry(bt, &running_trace_list, running_list) {
 126		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 127			   sizeof(tsk->comm), 0);
 128	}
 129	raw_spin_unlock_irqrestore(&running_trace_lock, flags);
 130}
 131
 132static void trace_note_time(struct blk_trace *bt)
 133{
 134	struct timespec64 now;
 135	unsigned long flags;
 136	u32 words[2];
 137
 138	/* need to check user space to see if this breaks in y2038 or y2106 */
 139	ktime_get_real_ts64(&now);
 140	words[0] = (u32)now.tv_sec;
 141	words[1] = now.tv_nsec;
 142
 143	local_irq_save(flags);
 144	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
 145	local_irq_restore(flags);
 146}
 147
 148void __blk_trace_note_message(struct blk_trace *bt,
 149		struct cgroup_subsys_state *css, const char *fmt, ...)
 150{
 151	int n;
 152	va_list args;
 153	unsigned long flags;
 154	char *buf;
 155	u64 cgid = 0;
 156
 157	if (unlikely(bt->trace_state != Blktrace_running &&
 158		     !blk_tracer_enabled))
 159		return;
 160
 161	/*
 162	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
 163	 * message to the trace.
 164	 */
 165	if (!(bt->act_mask & BLK_TC_NOTIFY))
 166		return;
 167
 168	local_irq_save(flags);
 169	buf = this_cpu_ptr(bt->msg_data);
 170	va_start(args, fmt);
 171	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
 172	va_end(args);
 173
 
 
 174#ifdef CONFIG_BLK_CGROUP
 175	if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 176		cgid = cgroup_id(css->cgroup);
 177	else
 178		cgid = 1;
 179#endif
 180	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
 181	local_irq_restore(flags);
 182}
 183EXPORT_SYMBOL_GPL(__blk_trace_note_message);
 184
 185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 186			 pid_t pid)
 187{
 188	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
 189		return 1;
 190	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
 191		return 1;
 192	if (bt->pid && pid != bt->pid)
 193		return 1;
 194
 195	return 0;
 196}
 197
 198/*
 199 * Data direction bit lookup
 200 */
 201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 202				 BLK_TC_ACT(BLK_TC_WRITE) };
 203
 204#define BLK_TC_RAHEAD		BLK_TC_AHEAD
 205#define BLK_TC_PREFLUSH		BLK_TC_FLUSH
 206
 207/* The ilog2() calls fall out because they're constant */
 208#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) <<	\
 209	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 210
 211/*
 212 * The worker for the various blk_add_trace*() types. Fills out a
 213 * blk_io_trace structure and places it in a per-cpu subbuffer.
 214 */
 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 216			    const blk_opf_t opf, u32 what, int error,
 217			    int pdu_len, void *pdu_data, u64 cgid)
 218{
 219	struct task_struct *tsk = current;
 220	struct ring_buffer_event *event = NULL;
 221	struct trace_buffer *buffer = NULL;
 222	struct blk_io_trace *t;
 223	unsigned long flags = 0;
 224	unsigned long *sequence;
 225	unsigned int trace_ctx = 0;
 226	pid_t pid;
 227	int cpu;
 228	bool blk_tracer = blk_tracer_enabled;
 229	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 230	const enum req_op op = opf & REQ_OP_MASK;
 231
 232	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
 233		return;
 234
 235	what |= ddir_act[op_is_write(op) ? WRITE : READ];
 236	what |= MASK_TC_BIT(opf, SYNC);
 237	what |= MASK_TC_BIT(opf, RAHEAD);
 238	what |= MASK_TC_BIT(opf, META);
 239	what |= MASK_TC_BIT(opf, PREFLUSH);
 240	what |= MASK_TC_BIT(opf, FUA);
 241	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
 242		what |= BLK_TC_ACT(BLK_TC_DISCARD);
 243	if (op == REQ_OP_FLUSH)
 244		what |= BLK_TC_ACT(BLK_TC_FLUSH);
 245	if (cgid)
 246		what |= __BLK_TA_CGROUP;
 247
 248	pid = tsk->pid;
 249	if (act_log_check(bt, what, sector, pid))
 250		return;
 251	cpu = raw_smp_processor_id();
 252
 253	if (blk_tracer) {
 254		tracing_record_cmdline(current);
 255
 256		buffer = blk_tr->array_buffer.buffer;
 257		trace_ctx = tracing_gen_ctx_flags(0);
 258		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 259						  sizeof(*t) + pdu_len + cgid_len,
 260						  trace_ctx);
 261		if (!event)
 262			return;
 263		t = ring_buffer_event_data(event);
 264		goto record_it;
 265	}
 266
 267	if (unlikely(tsk->btrace_seq != blktrace_seq))
 268		trace_note_tsk(tsk);
 269
 270	/*
 271	 * A word about the locking here - we disable interrupts to reserve
 272	 * some space in the relay per-cpu buffer, to prevent an irq
 273	 * from coming in and stepping on our toes.
 274	 */
 275	local_irq_save(flags);
 276	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
 277	if (t) {
 278		sequence = per_cpu_ptr(bt->sequence, cpu);
 279
 280		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
 281		t->sequence = ++(*sequence);
 282		t->time = ktime_to_ns(ktime_get());
 283record_it:
 284		/*
 285		 * These two are not needed in ftrace as they are in the
 286		 * generic trace_entry, filled by tracing_generic_entry_update,
 287		 * but for the trace_event->bin() synthesizer benefit we do it
 288		 * here too.
 289		 */
 290		t->cpu = cpu;
 291		t->pid = pid;
 292
 293		t->sector = sector;
 294		t->bytes = bytes;
 295		t->action = what;
 296		t->device = bt->dev;
 297		t->error = error;
 298		t->pdu_len = pdu_len + cgid_len;
 299
 300		if (cgid_len)
 301			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 302		if (pdu_len)
 303			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
 304
 305		if (blk_tracer) {
 306			trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
 307			return;
 308		}
 309	}
 310
 311	local_irq_restore(flags);
 312}
 313
 314static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
 315{
 
 
 316	relay_close(bt->rchan);
 317
 318	/*
 319	 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
 320	 * under 'q->debugfs_dir', thus lookup and remove them.
 321	 */
 322	if (!bt->dir) {
 323		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
 324		debugfs_lookup_and_remove("msg", q->debugfs_dir);
 325	} else {
 326		debugfs_remove(bt->dir);
 327	}
 328	free_percpu(bt->sequence);
 329	free_percpu(bt->msg_data);
 330	kfree(bt);
 331}
 332
 333static void get_probe_ref(void)
 334{
 335	mutex_lock(&blk_probe_mutex);
 336	if (++blk_probes_ref == 1)
 337		blk_register_tracepoints();
 338	mutex_unlock(&blk_probe_mutex);
 339}
 340
 341static void put_probe_ref(void)
 342{
 343	mutex_lock(&blk_probe_mutex);
 344	if (!--blk_probes_ref)
 345		blk_unregister_tracepoints();
 346	mutex_unlock(&blk_probe_mutex);
 347}
 348
 349static int blk_trace_start(struct blk_trace *bt)
 350{
 351	if (bt->trace_state != Blktrace_setup &&
 352	    bt->trace_state != Blktrace_stopped)
 353		return -EINVAL;
 354
 355	blktrace_seq++;
 356	smp_mb();
 357	bt->trace_state = Blktrace_running;
 358	raw_spin_lock_irq(&running_trace_lock);
 359	list_add(&bt->running_list, &running_trace_list);
 360	raw_spin_unlock_irq(&running_trace_lock);
 361	trace_note_time(bt);
 362
 363	return 0;
 364}
 365
 366static int blk_trace_stop(struct blk_trace *bt)
 367{
 368	if (bt->trace_state != Blktrace_running)
 369		return -EINVAL;
 370
 371	bt->trace_state = Blktrace_stopped;
 372	raw_spin_lock_irq(&running_trace_lock);
 373	list_del_init(&bt->running_list);
 374	raw_spin_unlock_irq(&running_trace_lock);
 375	relay_flush(bt->rchan);
 376
 377	return 0;
 378}
 379
 380static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 381{
 382	blk_trace_stop(bt);
 383	synchronize_rcu();
 384	blk_trace_free(q, bt);
 385	put_probe_ref();
 386}
 387
 388static int __blk_trace_remove(struct request_queue *q)
 389{
 390	struct blk_trace *bt;
 391
 392	bt = rcu_replace_pointer(q->blk_trace, NULL,
 393				 lockdep_is_held(&q->debugfs_mutex));
 394	if (!bt)
 395		return -EINVAL;
 396
 397	blk_trace_cleanup(q, bt);
 
 398
 399	return 0;
 400}
 401
 402int blk_trace_remove(struct request_queue *q)
 403{
 404	int ret;
 405
 406	mutex_lock(&q->debugfs_mutex);
 407	ret = __blk_trace_remove(q);
 408	mutex_unlock(&q->debugfs_mutex);
 409
 410	return ret;
 411}
 412EXPORT_SYMBOL_GPL(blk_trace_remove);
 413
 414static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 415				size_t count, loff_t *ppos)
 416{
 417	struct blk_trace *bt = filp->private_data;
 418	char buf[16];
 419
 420	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
 421
 422	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 423}
 424
 425static const struct file_operations blk_dropped_fops = {
 426	.owner =	THIS_MODULE,
 427	.open =		simple_open,
 428	.read =		blk_dropped_read,
 429	.llseek =	default_llseek,
 430};
 431
 432static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 433				size_t count, loff_t *ppos)
 434{
 435	char *msg;
 436	struct blk_trace *bt;
 437
 438	if (count >= BLK_TN_MAX_MSG)
 439		return -EINVAL;
 440
 441	msg = memdup_user_nul(buffer, count);
 442	if (IS_ERR(msg))
 443		return PTR_ERR(msg);
 444
 445	bt = filp->private_data;
 446	__blk_trace_note_message(bt, NULL, "%s", msg);
 447	kfree(msg);
 448
 449	return count;
 450}
 451
 452static const struct file_operations blk_msg_fops = {
 453	.owner =	THIS_MODULE,
 454	.open =		simple_open,
 455	.write =	blk_msg_write,
 456	.llseek =	noop_llseek,
 457};
 458
 459/*
 460 * Keep track of how many times we encountered a full subbuffer, to aid
 461 * the user space app in telling how many lost events there were.
 462 */
 463static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
 464				     void *prev_subbuf, size_t prev_padding)
 465{
 466	struct blk_trace *bt;
 467
 468	if (!relay_buf_full(buf))
 469		return 1;
 470
 471	bt = buf->chan->private_data;
 472	atomic_inc(&bt->dropped);
 473	return 0;
 474}
 475
 476static int blk_remove_buf_file_callback(struct dentry *dentry)
 477{
 478	debugfs_remove(dentry);
 479
 480	return 0;
 481}
 482
 483static struct dentry *blk_create_buf_file_callback(const char *filename,
 484						   struct dentry *parent,
 485						   umode_t mode,
 486						   struct rchan_buf *buf,
 487						   int *is_global)
 488{
 489	return debugfs_create_file(filename, mode, parent, buf,
 490					&relay_file_operations);
 491}
 492
 493static const struct rchan_callbacks blk_relay_callbacks = {
 494	.subbuf_start		= blk_subbuf_start_callback,
 495	.create_buf_file	= blk_create_buf_file_callback,
 496	.remove_buf_file	= blk_remove_buf_file_callback,
 497};
 498
 499static void blk_trace_setup_lba(struct blk_trace *bt,
 500				struct block_device *bdev)
 501{
 502	if (bdev) {
 503		bt->start_lba = bdev->bd_start_sect;
 504		bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
 
 
 
 
 
 505	} else {
 506		bt->start_lba = 0;
 507		bt->end_lba = -1ULL;
 508	}
 509}
 510
 511/*
 512 * Setup everything required to start tracing
 513 */
 514static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 515			      struct block_device *bdev,
 516			      struct blk_user_trace_setup *buts)
 517{
 518	struct blk_trace *bt = NULL;
 519	struct dentry *dir = NULL;
 520	int ret;
 521
 522	lockdep_assert_held(&q->debugfs_mutex);
 523
 524	if (!buts->buf_size || !buts->buf_nr)
 525		return -EINVAL;
 526
 527	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
 528	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
 529
 530	/*
 531	 * some device names have larger paths - convert the slashes
 532	 * to underscores for this to work as expected
 533	 */
 534	strreplace(buts->name, '/', '_');
 535
 536	/*
 537	 * bdev can be NULL, as with scsi-generic, this is a helpful as
 538	 * we can be.
 539	 */
 540	if (rcu_dereference_protected(q->blk_trace,
 541				      lockdep_is_held(&q->debugfs_mutex))) {
 542		pr_warn("Concurrent blktraces are not allowed on %s\n",
 543			buts->name);
 544		return -EBUSY;
 545	}
 546
 547	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
 548	if (!bt)
 549		return -ENOMEM;
 550
 551	ret = -ENOMEM;
 552	bt->sequence = alloc_percpu(unsigned long);
 553	if (!bt->sequence)
 554		goto err;
 555
 556	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
 557	if (!bt->msg_data)
 558		goto err;
 559
 560	/*
 561	 * When tracing the whole disk reuse the existing debugfs directory
 562	 * created by the block layer on init. For partitions block devices,
 563	 * and scsi-generic block devices we create a temporary new debugfs
 564	 * directory that will be removed once the trace ends.
 565	 */
 566	if (bdev && !bdev_is_partition(bdev))
 567		dir = q->debugfs_dir;
 568	else
 569		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
 570
 571	/*
 572	 * As blktrace relies on debugfs for its interface the debugfs directory
 573	 * is required, contrary to the usual mantra of not checking for debugfs
 574	 * files or directories.
 575	 */
 576	if (IS_ERR_OR_NULL(dir)) {
 577		pr_warn("debugfs_dir not present for %s so skipping\n",
 578			buts->name);
 579		ret = -ENOENT;
 580		goto err;
 581	}
 582
 583	bt->dev = dev;
 584	atomic_set(&bt->dropped, 0);
 585	INIT_LIST_HEAD(&bt->running_list);
 586
 587	ret = -EIO;
 588	debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
 589	debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
 
 
 590
 591	bt->rchan = relay_open("trace", dir, buts->buf_size,
 592				buts->buf_nr, &blk_relay_callbacks, bt);
 593	if (!bt->rchan)
 594		goto err;
 595
 596	bt->act_mask = buts->act_mask;
 597	if (!bt->act_mask)
 598		bt->act_mask = (u16) -1;
 599
 600	blk_trace_setup_lba(bt, bdev);
 601
 602	/* overwrite with user settings */
 603	if (buts->start_lba)
 604		bt->start_lba = buts->start_lba;
 605	if (buts->end_lba)
 606		bt->end_lba = buts->end_lba;
 607
 608	bt->pid = buts->pid;
 609	bt->trace_state = Blktrace_setup;
 610
 611	rcu_assign_pointer(q->blk_trace, bt);
 612	get_probe_ref();
 613
 614	ret = 0;
 615err:
 616	if (ret)
 617		blk_trace_free(q, bt);
 618	return ret;
 619}
 620
 621static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 622			     struct block_device *bdev, char __user *arg)
 623{
 624	struct blk_user_trace_setup buts;
 625	int ret;
 626
 627	ret = copy_from_user(&buts, arg, sizeof(buts));
 628	if (ret)
 629		return -EFAULT;
 630
 631	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 632	if (ret)
 633		return ret;
 634
 635	if (copy_to_user(arg, &buts, sizeof(buts))) {
 636		__blk_trace_remove(q);
 637		return -EFAULT;
 638	}
 639	return 0;
 640}
 641
 642int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 643		    struct block_device *bdev,
 644		    char __user *arg)
 645{
 646	int ret;
 647
 648	mutex_lock(&q->debugfs_mutex);
 649	ret = __blk_trace_setup(q, name, dev, bdev, arg);
 650	mutex_unlock(&q->debugfs_mutex);
 651
 652	return ret;
 653}
 654EXPORT_SYMBOL_GPL(blk_trace_setup);
 655
 656#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 657static int compat_blk_trace_setup(struct request_queue *q, char *name,
 658				  dev_t dev, struct block_device *bdev,
 659				  char __user *arg)
 660{
 661	struct blk_user_trace_setup buts;
 662	struct compat_blk_user_trace_setup cbuts;
 663	int ret;
 664
 665	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
 666		return -EFAULT;
 667
 668	buts = (struct blk_user_trace_setup) {
 669		.act_mask = cbuts.act_mask,
 670		.buf_size = cbuts.buf_size,
 671		.buf_nr = cbuts.buf_nr,
 672		.start_lba = cbuts.start_lba,
 673		.end_lba = cbuts.end_lba,
 674		.pid = cbuts.pid,
 675	};
 676
 677	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 678	if (ret)
 679		return ret;
 680
 681	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 682		__blk_trace_remove(q);
 683		return -EFAULT;
 684	}
 685
 686	return 0;
 687}
 688#endif
 689
 690static int __blk_trace_startstop(struct request_queue *q, int start)
 691{
 
 692	struct blk_trace *bt;
 693
 694	bt = rcu_dereference_protected(q->blk_trace,
 695				       lockdep_is_held(&q->debugfs_mutex));
 696	if (bt == NULL)
 697		return -EINVAL;
 698
 699	if (start)
 700		return blk_trace_start(bt);
 701	else
 702		return blk_trace_stop(bt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703}
 704
 705int blk_trace_startstop(struct request_queue *q, int start)
 706{
 707	int ret;
 708
 709	mutex_lock(&q->debugfs_mutex);
 710	ret = __blk_trace_startstop(q, start);
 711	mutex_unlock(&q->debugfs_mutex);
 712
 713	return ret;
 714}
 715EXPORT_SYMBOL_GPL(blk_trace_startstop);
 716
 717/*
 718 * When reading or writing the blktrace sysfs files, the references to the
 719 * opened sysfs or device files should prevent the underlying block device
 720 * from being removed. So no further delete protection is really needed.
 721 */
 722
 723/**
 724 * blk_trace_ioctl - handle the ioctls associated with tracing
 725 * @bdev:	the block device
 726 * @cmd:	the ioctl cmd
 727 * @arg:	the argument data, if any
 728 *
 729 **/
 730int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 731{
 732	struct request_queue *q = bdev_get_queue(bdev);
 733	int ret, start = 0;
 734	char b[BDEVNAME_SIZE];
 735
 
 
 
 
 736	mutex_lock(&q->debugfs_mutex);
 737
 738	switch (cmd) {
 739	case BLKTRACESETUP:
 740		snprintf(b, sizeof(b), "%pg", bdev);
 741		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 742		break;
 743#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 744	case BLKTRACESETUP32:
 745		snprintf(b, sizeof(b), "%pg", bdev);
 746		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 747		break;
 748#endif
 749	case BLKTRACESTART:
 750		start = 1;
 751		fallthrough;
 752	case BLKTRACESTOP:
 753		ret = __blk_trace_startstop(q, start);
 754		break;
 755	case BLKTRACETEARDOWN:
 756		ret = __blk_trace_remove(q);
 757		break;
 758	default:
 759		ret = -ENOTTY;
 760		break;
 761	}
 762
 763	mutex_unlock(&q->debugfs_mutex);
 764	return ret;
 765}
 766
 767/**
 768 * blk_trace_shutdown - stop and cleanup trace structures
 769 * @q:    the request queue associated with the device
 770 *
 771 **/
 772void blk_trace_shutdown(struct request_queue *q)
 773{
 
 774	if (rcu_dereference_protected(q->blk_trace,
 775				      lockdep_is_held(&q->debugfs_mutex)))
 
 776		__blk_trace_remove(q);
 
 
 
 777}
 778
 779#ifdef CONFIG_BLK_CGROUP
 780static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 781{
 782	struct cgroup_subsys_state *blkcg_css;
 783	struct blk_trace *bt;
 784
 785	/* We don't use the 'bt' value here except as an optimization... */
 786	bt = rcu_dereference_protected(q->blk_trace, 1);
 787	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 788		return 0;
 789
 790	blkcg_css = bio_blkcg_css(bio);
 791	if (!blkcg_css)
 792		return 0;
 793	return cgroup_id(blkcg_css->cgroup);
 794}
 795#else
 796static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 797{
 798	return 0;
 799}
 800#endif
 801
 802static u64
 803blk_trace_request_get_cgid(struct request *rq)
 804{
 805	if (!rq->bio)
 806		return 0;
 807	/* Use the first bio */
 808	return blk_trace_bio_get_cgid(rq->q, rq->bio);
 809}
 810
 811/*
 812 * blktrace probes
 813 */
 814
 815/**
 816 * blk_add_trace_rq - Add a trace for a request oriented action
 817 * @rq:		the source request
 818 * @error:	return status to log
 819 * @nr_bytes:	number of completed bytes
 820 * @what:	the action
 821 * @cgid:	the cgroup info
 822 *
 823 * Description:
 824 *     Records an action against a request. Will log the bio offset + size.
 825 *
 826 **/
 827static void blk_add_trace_rq(struct request *rq, blk_status_t error,
 828			     unsigned int nr_bytes, u32 what, u64 cgid)
 829{
 830	struct blk_trace *bt;
 831
 832	rcu_read_lock();
 833	bt = rcu_dereference(rq->q->blk_trace);
 834	if (likely(!bt)) {
 835		rcu_read_unlock();
 836		return;
 837	}
 838
 839	if (blk_rq_is_passthrough(rq))
 840		what |= BLK_TC_ACT(BLK_TC_PC);
 841	else
 842		what |= BLK_TC_ACT(BLK_TC_FS);
 843
 844	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
 845			what, blk_status_to_errno(error), 0, NULL, cgid);
 846	rcu_read_unlock();
 847}
 848
 849static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
 
 850{
 851	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
 852			 blk_trace_request_get_cgid(rq));
 853}
 854
 855static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
 
 856{
 857	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
 858			 blk_trace_request_get_cgid(rq));
 859}
 860
 861static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
 
 862{
 863	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
 864			 blk_trace_request_get_cgid(rq));
 865}
 866
 867static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
 
 
 868{
 869	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
 870			 blk_trace_request_get_cgid(rq));
 871}
 872
 873static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
 874			blk_status_t error, unsigned int nr_bytes)
 875{
 876	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
 877			 blk_trace_request_get_cgid(rq));
 878}
 879
 880/**
 881 * blk_add_trace_bio - Add a trace for a bio oriented action
 882 * @q:		queue the io is for
 883 * @bio:	the source bio
 884 * @what:	the action
 885 * @error:	error, if any
 886 *
 887 * Description:
 888 *     Records an action against a bio. Will log the bio offset + size.
 889 *
 890 **/
 891static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 892			      u32 what, int error)
 893{
 894	struct blk_trace *bt;
 895
 896	rcu_read_lock();
 897	bt = rcu_dereference(q->blk_trace);
 898	if (likely(!bt)) {
 899		rcu_read_unlock();
 900		return;
 901	}
 902
 903	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 904			bio->bi_opf, what, error, 0, NULL,
 905			blk_trace_bio_get_cgid(q, bio));
 906	rcu_read_unlock();
 907}
 908
 909static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
 
 910{
 911	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
 912}
 913
 914static void blk_add_trace_bio_complete(void *ignore,
 915				       struct request_queue *q, struct bio *bio)
 916{
 917	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
 918			  blk_status_to_errno(bio->bi_status));
 919}
 920
 921static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 922{
 923	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
 924			0);
 925}
 926
 927static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
 
 928{
 929	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
 930			0);
 931}
 932
 933static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
 
 
 934{
 935	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938static void blk_add_trace_getrq(void *ignore, struct bio *bio)
 
 
 
 939{
 940	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
 
 
 
 
 
 
 
 
 
 
 
 941}
 942
 943static void blk_add_trace_plug(void *ignore, struct request_queue *q)
 944{
 945	struct blk_trace *bt;
 946
 947	rcu_read_lock();
 948	bt = rcu_dereference(q->blk_trace);
 949	if (bt)
 950		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
 951	rcu_read_unlock();
 952}
 953
 954static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
 955				    unsigned int depth, bool explicit)
 956{
 957	struct blk_trace *bt;
 958
 959	rcu_read_lock();
 960	bt = rcu_dereference(q->blk_trace);
 961	if (bt) {
 962		__be64 rpdu = cpu_to_be64(depth);
 963		u32 what;
 964
 965		if (explicit)
 966			what = BLK_TA_UNPLUG_IO;
 967		else
 968			what = BLK_TA_UNPLUG_TIMER;
 969
 970		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
 971	}
 972	rcu_read_unlock();
 973}
 974
 975static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
 
 
 976{
 977	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
 978	struct blk_trace *bt;
 979
 980	rcu_read_lock();
 981	bt = rcu_dereference(q->blk_trace);
 982	if (bt) {
 983		__be64 rpdu = cpu_to_be64(pdu);
 984
 985		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 986				bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
 
 987				blk_status_to_errno(bio->bi_status),
 988				sizeof(rpdu), &rpdu,
 989				blk_trace_bio_get_cgid(q, bio));
 990	}
 991	rcu_read_unlock();
 992}
 993
 994/**
 995 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
 996 * @ignore:	trace callback data parameter (not used)
 
 997 * @bio:	the source bio
 998 * @dev:	source device
 999 * @from:	source sector
1000 *
1001 * Called after a bio is remapped to a different device and/or sector.
 
 
 
1002 **/
1003static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1004				    sector_t from)
 
1005{
1006	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1007	struct blk_trace *bt;
1008	struct blk_io_trace_remap r;
1009
1010	rcu_read_lock();
1011	bt = rcu_dereference(q->blk_trace);
1012	if (likely(!bt)) {
1013		rcu_read_unlock();
1014		return;
1015	}
1016
1017	r.device_from = cpu_to_be32(dev);
1018	r.device_to   = cpu_to_be32(bio_dev(bio));
1019	r.sector_from = cpu_to_be64(from);
1020
1021	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1022			bio->bi_opf, BLK_TA_REMAP,
1023			blk_status_to_errno(bio->bi_status),
1024			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1025	rcu_read_unlock();
1026}
1027
1028/**
1029 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1030 * @ignore:	trace callback data parameter (not used)
 
1031 * @rq:		the source request
1032 * @dev:	target device
1033 * @from:	source sector
1034 *
1035 * Description:
1036 *     Device mapper remaps request to other devices.
1037 *     Add a trace for that action.
1038 *
1039 **/
1040static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
 
 
1041				   sector_t from)
1042{
1043	struct blk_trace *bt;
1044	struct blk_io_trace_remap r;
1045
1046	rcu_read_lock();
1047	bt = rcu_dereference(rq->q->blk_trace);
1048	if (likely(!bt)) {
1049		rcu_read_unlock();
1050		return;
1051	}
1052
1053	r.device_from = cpu_to_be32(dev);
1054	r.device_to   = cpu_to_be32(disk_devt(rq->q->disk));
1055	r.sector_from = cpu_to_be64(from);
1056
1057	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1058			rq->cmd_flags, BLK_TA_REMAP, 0,
1059			sizeof(r), &r, blk_trace_request_get_cgid(rq));
1060	rcu_read_unlock();
1061}
1062
1063/**
1064 * blk_add_driver_data - Add binary message with driver-specific data
 
1065 * @rq:		io request
1066 * @data:	driver-specific data
1067 * @len:	length of driver-specific data
1068 *
1069 * Description:
1070 *     Some drivers might want to write driver-specific data per request.
1071 *
1072 **/
1073void blk_add_driver_data(struct request *rq, void *data, size_t len)
 
 
1074{
1075	struct blk_trace *bt;
1076
1077	rcu_read_lock();
1078	bt = rcu_dereference(rq->q->blk_trace);
1079	if (likely(!bt)) {
1080		rcu_read_unlock();
1081		return;
1082	}
1083
1084	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1085				BLK_TA_DRV_DATA, 0, len, data,
1086				blk_trace_request_get_cgid(rq));
1087	rcu_read_unlock();
1088}
1089EXPORT_SYMBOL_GPL(blk_add_driver_data);
1090
1091static void blk_register_tracepoints(void)
1092{
1093	int ret;
1094
1095	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1096	WARN_ON(ret);
1097	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1098	WARN_ON(ret);
1099	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1100	WARN_ON(ret);
1101	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1102	WARN_ON(ret);
1103	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1104	WARN_ON(ret);
1105	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1106	WARN_ON(ret);
1107	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1108	WARN_ON(ret);
1109	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1110	WARN_ON(ret);
1111	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1112	WARN_ON(ret);
1113	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1114	WARN_ON(ret);
1115	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1116	WARN_ON(ret);
 
 
1117	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1118	WARN_ON(ret);
1119	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1120	WARN_ON(ret);
1121	ret = register_trace_block_split(blk_add_trace_split, NULL);
1122	WARN_ON(ret);
1123	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1124	WARN_ON(ret);
1125	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1126	WARN_ON(ret);
1127}
1128
1129static void blk_unregister_tracepoints(void)
1130{
1131	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1132	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1133	unregister_trace_block_split(blk_add_trace_split, NULL);
1134	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1135	unregister_trace_block_plug(blk_add_trace_plug, NULL);
 
1136	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1137	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1138	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1139	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1140	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1141	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1142	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1143	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1144	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1145	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1146	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1147
1148	tracepoint_synchronize_unregister();
1149}
1150
1151/*
1152 * struct blk_io_tracer formatting routines
1153 */
1154
1155static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1156{
1157	int i = 0;
1158	int tc = t->action >> BLK_TC_SHIFT;
1159
1160	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1161		rwbs[i++] = 'N';
1162		goto out;
1163	}
1164
1165	if (tc & BLK_TC_FLUSH)
1166		rwbs[i++] = 'F';
1167
1168	if (tc & BLK_TC_DISCARD)
1169		rwbs[i++] = 'D';
1170	else if (tc & BLK_TC_WRITE)
1171		rwbs[i++] = 'W';
1172	else if (t->bytes)
1173		rwbs[i++] = 'R';
1174	else
1175		rwbs[i++] = 'N';
1176
1177	if (tc & BLK_TC_FUA)
1178		rwbs[i++] = 'F';
1179	if (tc & BLK_TC_AHEAD)
1180		rwbs[i++] = 'A';
1181	if (tc & BLK_TC_SYNC)
1182		rwbs[i++] = 'S';
1183	if (tc & BLK_TC_META)
1184		rwbs[i++] = 'M';
1185out:
1186	rwbs[i] = '\0';
1187}
1188
1189static inline
1190const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1191{
1192	return (const struct blk_io_trace *)ent;
1193}
1194
1195static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1196{
1197	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1198}
1199
1200static inline u64 t_cgid(const struct trace_entry *ent)
1201{
1202	return *(u64 *)(te_blk_io_trace(ent) + 1);
1203}
1204
1205static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1206{
1207	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1208}
1209
1210static inline u32 t_action(const struct trace_entry *ent)
1211{
1212	return te_blk_io_trace(ent)->action;
1213}
1214
1215static inline u32 t_bytes(const struct trace_entry *ent)
1216{
1217	return te_blk_io_trace(ent)->bytes;
1218}
1219
1220static inline u32 t_sec(const struct trace_entry *ent)
1221{
1222	return te_blk_io_trace(ent)->bytes >> 9;
1223}
1224
1225static inline unsigned long long t_sector(const struct trace_entry *ent)
1226{
1227	return te_blk_io_trace(ent)->sector;
1228}
1229
1230static inline __u16 t_error(const struct trace_entry *ent)
1231{
1232	return te_blk_io_trace(ent)->error;
1233}
1234
1235static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1236{
1237	const __be64 *val = pdu_start(ent, has_cg);
1238	return be64_to_cpu(*val);
1239}
1240
1241typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1242	bool has_cg);
1243
1244static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1245	bool has_cg)
1246{
1247	char rwbs[RWBS_LEN];
1248	unsigned long long ts  = iter->ts;
1249	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1250	unsigned secs	       = (unsigned long)ts;
1251	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1252
1253	fill_rwbs(rwbs, t);
1254
1255	trace_seq_printf(&iter->seq,
1256			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1257			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1258			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1259}
1260
1261static void blk_log_action(struct trace_iterator *iter, const char *act,
1262	bool has_cg)
1263{
1264	char rwbs[RWBS_LEN];
1265	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1266
1267	fill_rwbs(rwbs, t);
1268	if (has_cg) {
1269		u64 id = t_cgid(iter->ent);
1270
1271		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1272			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1273
1274			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1275				sizeof(blkcg_name_buf));
1276			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1277				 MAJOR(t->device), MINOR(t->device),
1278				 blkcg_name_buf, act, rwbs);
1279		} else {
1280			/*
1281			 * The cgid portion used to be "INO,GEN".  Userland
1282			 * builds a FILEID_INO32_GEN fid out of them and
1283			 * opens the cgroup using open_by_handle_at(2).
1284			 * While 32bit ino setups are still the same, 64bit
1285			 * ones now use the 64bit ino as the whole ID and
1286			 * no longer use generation.
1287			 *
1288			 * Regardless of the content, always output
1289			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1290			 * be mapped back to @id on both 64 and 32bit ino
1291			 * setups.  See __kernfs_fh_to_dentry().
1292			 */
1293			trace_seq_printf(&iter->seq,
1294				 "%3d,%-3d %llx,%-llx %2s %3s ",
1295				 MAJOR(t->device), MINOR(t->device),
1296				 id & U32_MAX, id >> 32, act, rwbs);
1297		}
1298	} else
1299		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1300				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1301}
1302
1303static void blk_log_dump_pdu(struct trace_seq *s,
1304	const struct trace_entry *ent, bool has_cg)
1305{
1306	const unsigned char *pdu_buf;
1307	int pdu_len;
1308	int i, end;
1309
1310	pdu_buf = pdu_start(ent, has_cg);
1311	pdu_len = pdu_real_len(ent, has_cg);
1312
1313	if (!pdu_len)
1314		return;
1315
1316	/* find the last zero that needs to be printed */
1317	for (end = pdu_len - 1; end >= 0; end--)
1318		if (pdu_buf[end])
1319			break;
1320	end++;
1321
1322	trace_seq_putc(s, '(');
1323
1324	for (i = 0; i < pdu_len; i++) {
1325
1326		trace_seq_printf(s, "%s%02x",
1327				 i == 0 ? "" : " ", pdu_buf[i]);
1328
1329		/*
1330		 * stop when the rest is just zeros and indicate so
1331		 * with a ".." appended
1332		 */
1333		if (i == end && end != pdu_len - 1) {
1334			trace_seq_puts(s, " ..) ");
1335			return;
1336		}
1337	}
1338
1339	trace_seq_puts(s, ") ");
1340}
1341
1342static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1343{
1344	char cmd[TASK_COMM_LEN];
1345
1346	trace_find_cmdline(ent->pid, cmd);
1347
1348	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1349		trace_seq_printf(s, "%u ", t_bytes(ent));
1350		blk_log_dump_pdu(s, ent, has_cg);
1351		trace_seq_printf(s, "[%s]\n", cmd);
1352	} else {
1353		if (t_sec(ent))
1354			trace_seq_printf(s, "%llu + %u [%s]\n",
1355						t_sector(ent), t_sec(ent), cmd);
1356		else
1357			trace_seq_printf(s, "[%s]\n", cmd);
1358	}
1359}
1360
1361static void blk_log_with_error(struct trace_seq *s,
1362			      const struct trace_entry *ent, bool has_cg)
1363{
1364	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1365		blk_log_dump_pdu(s, ent, has_cg);
1366		trace_seq_printf(s, "[%d]\n", t_error(ent));
1367	} else {
1368		if (t_sec(ent))
1369			trace_seq_printf(s, "%llu + %u [%d]\n",
1370					 t_sector(ent),
1371					 t_sec(ent), t_error(ent));
1372		else
1373			trace_seq_printf(s, "%llu [%d]\n",
1374					 t_sector(ent), t_error(ent));
1375	}
1376}
1377
1378static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1379{
1380	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1381
1382	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1383			 t_sector(ent), t_sec(ent),
1384			 MAJOR(be32_to_cpu(__r->device_from)),
1385			 MINOR(be32_to_cpu(__r->device_from)),
1386			 be64_to_cpu(__r->sector_from));
1387}
1388
1389static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1390{
1391	char cmd[TASK_COMM_LEN];
1392
1393	trace_find_cmdline(ent->pid, cmd);
1394
1395	trace_seq_printf(s, "[%s]\n", cmd);
1396}
1397
1398static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1399{
1400	char cmd[TASK_COMM_LEN];
1401
1402	trace_find_cmdline(ent->pid, cmd);
1403
1404	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1405}
1406
1407static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1408{
1409	char cmd[TASK_COMM_LEN];
1410
1411	trace_find_cmdline(ent->pid, cmd);
1412
1413	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1414			 get_pdu_int(ent, has_cg), cmd);
1415}
1416
1417static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1418			bool has_cg)
1419{
1420
1421	trace_seq_putmem(s, pdu_start(ent, has_cg),
1422		pdu_real_len(ent, has_cg));
1423	trace_seq_putc(s, '\n');
1424}
1425
1426/*
1427 * struct tracer operations
1428 */
1429
1430static void blk_tracer_print_header(struct seq_file *m)
1431{
1432	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1433		return;
1434	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1435		    "#  |     |     |           |   |   |\n");
1436}
1437
1438static void blk_tracer_start(struct trace_array *tr)
1439{
1440	blk_tracer_enabled = true;
1441}
1442
1443static int blk_tracer_init(struct trace_array *tr)
1444{
1445	blk_tr = tr;
1446	blk_tracer_start(tr);
1447	return 0;
1448}
1449
1450static void blk_tracer_stop(struct trace_array *tr)
1451{
1452	blk_tracer_enabled = false;
1453}
1454
1455static void blk_tracer_reset(struct trace_array *tr)
1456{
1457	blk_tracer_stop(tr);
1458}
1459
1460static const struct {
1461	const char *act[2];
1462	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1463			    bool has_cg);
1464} what2act[] = {
1465	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1466	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1467	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1468	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1469	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1470	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1471	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1472	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1473	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1474	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1475	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1476	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1477	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1478	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1479	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1480};
1481
1482static enum print_line_t print_one_line(struct trace_iterator *iter,
1483					bool classic)
1484{
1485	struct trace_array *tr = iter->tr;
1486	struct trace_seq *s = &iter->seq;
1487	const struct blk_io_trace *t;
1488	u16 what;
1489	bool long_act;
1490	blk_log_action_t *log_action;
1491	bool has_cg;
1492
1493	t	   = te_blk_io_trace(iter->ent);
1494	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1495	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1496	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1497	has_cg	   = t->action & __BLK_TA_CGROUP;
1498
1499	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1500		log_action(iter, long_act ? "message" : "m", has_cg);
1501		blk_log_msg(s, iter->ent, has_cg);
1502		return trace_handle_return(s);
1503	}
1504
1505	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1506		trace_seq_printf(s, "Unknown action %x\n", what);
1507	else {
1508		log_action(iter, what2act[what].act[long_act], has_cg);
1509		what2act[what].print(s, iter->ent, has_cg);
1510	}
1511
1512	return trace_handle_return(s);
1513}
1514
1515static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1516					       int flags, struct trace_event *event)
1517{
1518	return print_one_line(iter, false);
1519}
1520
1521static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1522{
1523	struct trace_seq *s = &iter->seq;
1524	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1525	const int offset = offsetof(struct blk_io_trace, sector);
1526	struct blk_io_trace old = {
1527		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1528		.time     = iter->ts,
1529	};
1530
1531	trace_seq_putmem(s, &old, offset);
1532	trace_seq_putmem(s, &t->sector,
1533			 sizeof(old) - offset + t->pdu_len);
1534}
1535
1536static enum print_line_t
1537blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1538			     struct trace_event *event)
1539{
1540	blk_trace_synthesize_old_trace(iter);
1541
1542	return trace_handle_return(&iter->seq);
1543}
1544
1545static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1546{
1547	if ((iter->ent->type != TRACE_BLK) ||
1548	    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1549		return TRACE_TYPE_UNHANDLED;
1550
1551	return print_one_line(iter, true);
1552}
1553
1554static int
1555blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1556{
1557	/* don't output context-info for blk_classic output */
1558	if (bit == TRACE_BLK_OPT_CLASSIC) {
1559		if (set)
1560			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1561		else
1562			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1563	}
1564	return 0;
1565}
1566
1567static struct tracer blk_tracer __read_mostly = {
1568	.name		= "blk",
1569	.init		= blk_tracer_init,
1570	.reset		= blk_tracer_reset,
1571	.start		= blk_tracer_start,
1572	.stop		= blk_tracer_stop,
1573	.print_header	= blk_tracer_print_header,
1574	.print_line	= blk_tracer_print_line,
1575	.flags		= &blk_tracer_flags,
1576	.set_flag	= blk_tracer_set_flag,
1577};
1578
1579static struct trace_event_functions trace_blk_event_funcs = {
1580	.trace		= blk_trace_event_print,
1581	.binary		= blk_trace_event_print_binary,
1582};
1583
1584static struct trace_event trace_blk_event = {
1585	.type		= TRACE_BLK,
1586	.funcs		= &trace_blk_event_funcs,
1587};
1588
1589static int __init init_blk_tracer(void)
1590{
1591	if (!register_trace_event(&trace_blk_event)) {
1592		pr_warn("Warning: could not register block events\n");
1593		return 1;
1594	}
1595
1596	if (register_tracer(&blk_tracer) != 0) {
1597		pr_warn("Warning: could not register the block tracer\n");
1598		unregister_trace_event(&trace_blk_event);
1599		return 1;
1600	}
1601
1602	return 0;
1603}
1604
1605device_initcall(init_blk_tracer);
1606
1607static int blk_trace_remove_queue(struct request_queue *q)
1608{
1609	struct blk_trace *bt;
1610
1611	bt = rcu_replace_pointer(q->blk_trace, NULL,
1612				 lockdep_is_held(&q->debugfs_mutex));
1613	if (bt == NULL)
1614		return -EINVAL;
1615
1616	blk_trace_stop(bt);
1617
1618	put_probe_ref();
1619	synchronize_rcu();
1620	blk_trace_free(q, bt);
1621	return 0;
1622}
1623
1624/*
1625 * Setup everything required to start tracing
1626 */
1627static int blk_trace_setup_queue(struct request_queue *q,
1628				 struct block_device *bdev)
1629{
1630	struct blk_trace *bt = NULL;
1631	int ret = -ENOMEM;
1632
1633	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1634	if (!bt)
1635		return -ENOMEM;
1636
1637	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1638	if (!bt->msg_data)
1639		goto free_bt;
1640
1641	bt->dev = bdev->bd_dev;
1642	bt->act_mask = (u16)-1;
1643
1644	blk_trace_setup_lba(bt, bdev);
1645
1646	rcu_assign_pointer(q->blk_trace, bt);
1647	get_probe_ref();
1648	return 0;
1649
1650free_bt:
1651	blk_trace_free(q, bt);
1652	return ret;
1653}
1654
1655/*
1656 * sysfs interface to enable and configure tracing
1657 */
1658
1659static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1660					 struct device_attribute *attr,
1661					 char *buf);
1662static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1663					  struct device_attribute *attr,
1664					  const char *buf, size_t count);
1665#define BLK_TRACE_DEVICE_ATTR(_name) \
1666	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1667		    sysfs_blk_trace_attr_show, \
1668		    sysfs_blk_trace_attr_store)
1669
1670static BLK_TRACE_DEVICE_ATTR(enable);
1671static BLK_TRACE_DEVICE_ATTR(act_mask);
1672static BLK_TRACE_DEVICE_ATTR(pid);
1673static BLK_TRACE_DEVICE_ATTR(start_lba);
1674static BLK_TRACE_DEVICE_ATTR(end_lba);
1675
1676static struct attribute *blk_trace_attrs[] = {
1677	&dev_attr_enable.attr,
1678	&dev_attr_act_mask.attr,
1679	&dev_attr_pid.attr,
1680	&dev_attr_start_lba.attr,
1681	&dev_attr_end_lba.attr,
1682	NULL
1683};
1684
1685struct attribute_group blk_trace_attr_group = {
1686	.name  = "trace",
1687	.attrs = blk_trace_attrs,
1688};
1689
1690static const struct {
1691	int mask;
1692	const char *str;
1693} mask_maps[] = {
1694	{ BLK_TC_READ,		"read"		},
1695	{ BLK_TC_WRITE,		"write"		},
1696	{ BLK_TC_FLUSH,		"flush"		},
1697	{ BLK_TC_SYNC,		"sync"		},
1698	{ BLK_TC_QUEUE,		"queue"		},
1699	{ BLK_TC_REQUEUE,	"requeue"	},
1700	{ BLK_TC_ISSUE,		"issue"		},
1701	{ BLK_TC_COMPLETE,	"complete"	},
1702	{ BLK_TC_FS,		"fs"		},
1703	{ BLK_TC_PC,		"pc"		},
1704	{ BLK_TC_NOTIFY,	"notify"	},
1705	{ BLK_TC_AHEAD,		"ahead"		},
1706	{ BLK_TC_META,		"meta"		},
1707	{ BLK_TC_DISCARD,	"discard"	},
1708	{ BLK_TC_DRV_DATA,	"drv_data"	},
1709	{ BLK_TC_FUA,		"fua"		},
1710};
1711
1712static int blk_trace_str2mask(const char *str)
1713{
1714	int i;
1715	int mask = 0;
1716	char *buf, *s, *token;
1717
1718	buf = kstrdup(str, GFP_KERNEL);
1719	if (buf == NULL)
1720		return -ENOMEM;
1721	s = strstrip(buf);
1722
1723	while (1) {
1724		token = strsep(&s, ",");
1725		if (token == NULL)
1726			break;
1727
1728		if (*token == '\0')
1729			continue;
1730
1731		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1732			if (strcasecmp(token, mask_maps[i].str) == 0) {
1733				mask |= mask_maps[i].mask;
1734				break;
1735			}
1736		}
1737		if (i == ARRAY_SIZE(mask_maps)) {
1738			mask = -EINVAL;
1739			break;
1740		}
1741	}
1742	kfree(buf);
1743
1744	return mask;
1745}
1746
1747static ssize_t blk_trace_mask2str(char *buf, int mask)
1748{
1749	int i;
1750	char *p = buf;
1751
1752	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1753		if (mask & mask_maps[i].mask) {
1754			p += sprintf(p, "%s%s",
1755				    (p == buf) ? "" : ",", mask_maps[i].str);
1756		}
1757	}
1758	*p++ = '\n';
1759
1760	return p - buf;
1761}
1762
 
 
 
 
 
 
 
 
1763static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1764					 struct device_attribute *attr,
1765					 char *buf)
1766{
1767	struct block_device *bdev = dev_to_bdev(dev);
1768	struct request_queue *q = bdev_get_queue(bdev);
 
1769	struct blk_trace *bt;
1770	ssize_t ret = -ENXIO;
1771
 
 
 
 
 
 
 
 
1772	mutex_lock(&q->debugfs_mutex);
1773
1774	bt = rcu_dereference_protected(q->blk_trace,
1775				       lockdep_is_held(&q->debugfs_mutex));
1776	if (attr == &dev_attr_enable) {
1777		ret = sprintf(buf, "%u\n", !!bt);
1778		goto out_unlock_bdev;
1779	}
1780
1781	if (bt == NULL)
1782		ret = sprintf(buf, "disabled\n");
1783	else if (attr == &dev_attr_act_mask)
1784		ret = blk_trace_mask2str(buf, bt->act_mask);
1785	else if (attr == &dev_attr_pid)
1786		ret = sprintf(buf, "%u\n", bt->pid);
1787	else if (attr == &dev_attr_start_lba)
1788		ret = sprintf(buf, "%llu\n", bt->start_lba);
1789	else if (attr == &dev_attr_end_lba)
1790		ret = sprintf(buf, "%llu\n", bt->end_lba);
1791
1792out_unlock_bdev:
1793	mutex_unlock(&q->debugfs_mutex);
 
 
 
1794	return ret;
1795}
1796
1797static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1798					  struct device_attribute *attr,
1799					  const char *buf, size_t count)
1800{
1801	struct block_device *bdev = dev_to_bdev(dev);
1802	struct request_queue *q = bdev_get_queue(bdev);
 
1803	struct blk_trace *bt;
1804	u64 value;
1805	ssize_t ret = -EINVAL;
1806
1807	if (count == 0)
1808		goto out;
1809
1810	if (attr == &dev_attr_act_mask) {
1811		if (kstrtoull(buf, 0, &value)) {
1812			/* Assume it is a list of trace category names */
1813			ret = blk_trace_str2mask(buf);
1814			if (ret < 0)
1815				goto out;
1816			value = ret;
1817		}
1818	} else {
1819		if (kstrtoull(buf, 0, &value))
1820			goto out;
1821	}
 
 
 
 
 
 
 
 
 
1822
1823	mutex_lock(&q->debugfs_mutex);
1824
1825	bt = rcu_dereference_protected(q->blk_trace,
1826				       lockdep_is_held(&q->debugfs_mutex));
1827	if (attr == &dev_attr_enable) {
1828		if (!!value == !!bt) {
1829			ret = 0;
1830			goto out_unlock_bdev;
1831		}
1832		if (value)
1833			ret = blk_trace_setup_queue(q, bdev);
1834		else
1835			ret = blk_trace_remove_queue(q);
1836		goto out_unlock_bdev;
1837	}
1838
1839	ret = 0;
1840	if (bt == NULL) {
1841		ret = blk_trace_setup_queue(q, bdev);
1842		bt = rcu_dereference_protected(q->blk_trace,
1843				lockdep_is_held(&q->debugfs_mutex));
1844	}
1845
1846	if (ret == 0) {
1847		if (attr == &dev_attr_act_mask)
1848			bt->act_mask = value;
1849		else if (attr == &dev_attr_pid)
1850			bt->pid = value;
1851		else if (attr == &dev_attr_start_lba)
1852			bt->start_lba = value;
1853		else if (attr == &dev_attr_end_lba)
1854			bt->end_lba = value;
1855	}
1856
1857out_unlock_bdev:
1858	mutex_unlock(&q->debugfs_mutex);
 
 
1859out:
1860	return ret ? ret : count;
1861}
 
 
 
 
 
 
 
 
 
 
 
1862#endif /* CONFIG_BLK_DEV_IO_TRACE */
1863
1864#ifdef CONFIG_EVENT_TRACING
1865
1866/**
1867 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1868 * @rwbs:	buffer to be filled
1869 * @opf:	request operation type (REQ_OP_XXX) and flags for the tracepoint
1870 *
1871 * Description:
1872 *     Maps each request operation and flag to a single character and fills the
1873 *     buffer provided by the caller with resulting string.
1874 *
1875 **/
1876void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
1877{
1878	int i = 0;
1879
1880	if (opf & REQ_PREFLUSH)
1881		rwbs[i++] = 'F';
1882
1883	switch (opf & REQ_OP_MASK) {
1884	case REQ_OP_WRITE:
 
1885		rwbs[i++] = 'W';
1886		break;
1887	case REQ_OP_DISCARD:
1888		rwbs[i++] = 'D';
1889		break;
1890	case REQ_OP_SECURE_ERASE:
1891		rwbs[i++] = 'D';
1892		rwbs[i++] = 'E';
1893		break;
1894	case REQ_OP_FLUSH:
1895		rwbs[i++] = 'F';
1896		break;
1897	case REQ_OP_READ:
1898		rwbs[i++] = 'R';
1899		break;
1900	default:
1901		rwbs[i++] = 'N';
1902	}
1903
1904	if (opf & REQ_FUA)
1905		rwbs[i++] = 'F';
1906	if (opf & REQ_RAHEAD)
1907		rwbs[i++] = 'A';
1908	if (opf & REQ_SYNC)
1909		rwbs[i++] = 'S';
1910	if (opf & REQ_META)
1911		rwbs[i++] = 'M';
1912
1913	rwbs[i] = '\0';
1914}
1915EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1916
1917#endif /* CONFIG_EVENT_TRACING */
1918
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
   4 *
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/percpu.h>
  13#include <linux/init.h>
  14#include <linux/mutex.h>
  15#include <linux/slab.h>
  16#include <linux/debugfs.h>
  17#include <linux/export.h>
  18#include <linux/time.h>
  19#include <linux/uaccess.h>
  20#include <linux/list.h>
  21#include <linux/blk-cgroup.h>
  22
  23#include "../../block/blk.h"
  24
  25#include <trace/events/block.h>
  26
  27#include "trace_output.h"
  28
  29#ifdef CONFIG_BLK_DEV_IO_TRACE
  30
  31static unsigned int blktrace_seq __read_mostly = 1;
  32
  33static struct trace_array *blk_tr;
  34static bool blk_tracer_enabled __read_mostly;
  35
  36static LIST_HEAD(running_trace_list);
  37static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
  38
  39/* Select an alternative, minimalistic output than the original one */
  40#define TRACE_BLK_OPT_CLASSIC	0x1
  41#define TRACE_BLK_OPT_CGROUP	0x2
  42#define TRACE_BLK_OPT_CGNAME	0x4
  43
  44static struct tracer_opt blk_tracer_opts[] = {
  45	/* Default disable the minimalistic output */
  46	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  47#ifdef CONFIG_BLK_CGROUP
  48	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
  49	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
  50#endif
  51	{ }
  52};
  53
  54static struct tracer_flags blk_tracer_flags = {
  55	.val  = 0,
  56	.opts = blk_tracer_opts,
  57};
  58
  59/* Global reference count of probes */
  60static DEFINE_MUTEX(blk_probe_mutex);
  61static int blk_probes_ref;
  62
  63static void blk_register_tracepoints(void);
  64static void blk_unregister_tracepoints(void);
  65
  66/*
  67 * Send out a notify message.
  68 */
  69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  70		       const void *data, size_t len, u64 cgid)
  71{
  72	struct blk_io_trace *t;
  73	struct ring_buffer_event *event = NULL;
  74	struct trace_buffer *buffer = NULL;
  75	int pc = 0;
  76	int cpu = smp_processor_id();
  77	bool blk_tracer = blk_tracer_enabled;
  78	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
  79
  80	if (blk_tracer) {
  81		buffer = blk_tr->array_buffer.buffer;
  82		pc = preempt_count();
  83		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  84						  sizeof(*t) + len + cgid_len,
  85						  0, pc);
  86		if (!event)
  87			return;
  88		t = ring_buffer_event_data(event);
  89		goto record_it;
  90	}
  91
  92	if (!bt->rchan)
  93		return;
  94
  95	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
  96	if (t) {
  97		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  98		t->time = ktime_to_ns(ktime_get());
  99record_it:
 100		t->device = bt->dev;
 101		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
 102		t->pid = pid;
 103		t->cpu = cpu;
 104		t->pdu_len = len + cgid_len;
 105		if (cgid_len)
 106			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 107		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
 108
 109		if (blk_tracer)
 110			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 111	}
 112}
 113
 114/*
 115 * Send out a notify for this process, if we haven't done so since a trace
 116 * started
 117 */
 118static void trace_note_tsk(struct task_struct *tsk)
 119{
 120	unsigned long flags;
 121	struct blk_trace *bt;
 122
 123	tsk->btrace_seq = blktrace_seq;
 124	spin_lock_irqsave(&running_trace_lock, flags);
 125	list_for_each_entry(bt, &running_trace_list, running_list) {
 126		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 127			   sizeof(tsk->comm), 0);
 128	}
 129	spin_unlock_irqrestore(&running_trace_lock, flags);
 130}
 131
 132static void trace_note_time(struct blk_trace *bt)
 133{
 134	struct timespec64 now;
 135	unsigned long flags;
 136	u32 words[2];
 137
 138	/* need to check user space to see if this breaks in y2038 or y2106 */
 139	ktime_get_real_ts64(&now);
 140	words[0] = (u32)now.tv_sec;
 141	words[1] = now.tv_nsec;
 142
 143	local_irq_save(flags);
 144	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
 145	local_irq_restore(flags);
 146}
 147
 148void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
 149	const char *fmt, ...)
 150{
 151	int n;
 152	va_list args;
 153	unsigned long flags;
 154	char *buf;
 
 155
 156	if (unlikely(bt->trace_state != Blktrace_running &&
 157		     !blk_tracer_enabled))
 158		return;
 159
 160	/*
 161	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
 162	 * message to the trace.
 163	 */
 164	if (!(bt->act_mask & BLK_TC_NOTIFY))
 165		return;
 166
 167	local_irq_save(flags);
 168	buf = this_cpu_ptr(bt->msg_data);
 169	va_start(args, fmt);
 170	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
 171	va_end(args);
 172
 173	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 174		blkcg = NULL;
 175#ifdef CONFIG_BLK_CGROUP
 176	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
 177		   blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
 178#else
 179	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
 180#endif
 
 181	local_irq_restore(flags);
 182}
 183EXPORT_SYMBOL_GPL(__trace_note_message);
 184
 185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 186			 pid_t pid)
 187{
 188	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
 189		return 1;
 190	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
 191		return 1;
 192	if (bt->pid && pid != bt->pid)
 193		return 1;
 194
 195	return 0;
 196}
 197
 198/*
 199 * Data direction bit lookup
 200 */
 201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 202				 BLK_TC_ACT(BLK_TC_WRITE) };
 203
 204#define BLK_TC_RAHEAD		BLK_TC_AHEAD
 205#define BLK_TC_PREFLUSH		BLK_TC_FLUSH
 206
 207/* The ilog2() calls fall out because they're constant */
 208#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
 209	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 210
 211/*
 212 * The worker for the various blk_add_trace*() types. Fills out a
 213 * blk_io_trace structure and places it in a per-cpu subbuffer.
 214 */
 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 216		     int op, int op_flags, u32 what, int error, int pdu_len,
 217		     void *pdu_data, u64 cgid)
 218{
 219	struct task_struct *tsk = current;
 220	struct ring_buffer_event *event = NULL;
 221	struct trace_buffer *buffer = NULL;
 222	struct blk_io_trace *t;
 223	unsigned long flags = 0;
 224	unsigned long *sequence;
 
 225	pid_t pid;
 226	int cpu, pc = 0;
 227	bool blk_tracer = blk_tracer_enabled;
 228	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 
 229
 230	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
 231		return;
 232
 233	what |= ddir_act[op_is_write(op) ? WRITE : READ];
 234	what |= MASK_TC_BIT(op_flags, SYNC);
 235	what |= MASK_TC_BIT(op_flags, RAHEAD);
 236	what |= MASK_TC_BIT(op_flags, META);
 237	what |= MASK_TC_BIT(op_flags, PREFLUSH);
 238	what |= MASK_TC_BIT(op_flags, FUA);
 239	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
 240		what |= BLK_TC_ACT(BLK_TC_DISCARD);
 241	if (op == REQ_OP_FLUSH)
 242		what |= BLK_TC_ACT(BLK_TC_FLUSH);
 243	if (cgid)
 244		what |= __BLK_TA_CGROUP;
 245
 246	pid = tsk->pid;
 247	if (act_log_check(bt, what, sector, pid))
 248		return;
 249	cpu = raw_smp_processor_id();
 250
 251	if (blk_tracer) {
 252		tracing_record_cmdline(current);
 253
 254		buffer = blk_tr->array_buffer.buffer;
 255		pc = preempt_count();
 256		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 257						  sizeof(*t) + pdu_len + cgid_len,
 258						  0, pc);
 259		if (!event)
 260			return;
 261		t = ring_buffer_event_data(event);
 262		goto record_it;
 263	}
 264
 265	if (unlikely(tsk->btrace_seq != blktrace_seq))
 266		trace_note_tsk(tsk);
 267
 268	/*
 269	 * A word about the locking here - we disable interrupts to reserve
 270	 * some space in the relay per-cpu buffer, to prevent an irq
 271	 * from coming in and stepping on our toes.
 272	 */
 273	local_irq_save(flags);
 274	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
 275	if (t) {
 276		sequence = per_cpu_ptr(bt->sequence, cpu);
 277
 278		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
 279		t->sequence = ++(*sequence);
 280		t->time = ktime_to_ns(ktime_get());
 281record_it:
 282		/*
 283		 * These two are not needed in ftrace as they are in the
 284		 * generic trace_entry, filled by tracing_generic_entry_update,
 285		 * but for the trace_event->bin() synthesizer benefit we do it
 286		 * here too.
 287		 */
 288		t->cpu = cpu;
 289		t->pid = pid;
 290
 291		t->sector = sector;
 292		t->bytes = bytes;
 293		t->action = what;
 294		t->device = bt->dev;
 295		t->error = error;
 296		t->pdu_len = pdu_len + cgid_len;
 297
 298		if (cgid_len)
 299			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
 300		if (pdu_len)
 301			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
 302
 303		if (blk_tracer) {
 304			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
 305			return;
 306		}
 307	}
 308
 309	local_irq_restore(flags);
 310}
 311
 312static void blk_trace_free(struct blk_trace *bt)
 313{
 314	debugfs_remove(bt->msg_file);
 315	debugfs_remove(bt->dropped_file);
 316	relay_close(bt->rchan);
 317	debugfs_remove(bt->dir);
 
 
 
 
 
 
 
 
 
 
 318	free_percpu(bt->sequence);
 319	free_percpu(bt->msg_data);
 320	kfree(bt);
 321}
 322
 323static void get_probe_ref(void)
 324{
 325	mutex_lock(&blk_probe_mutex);
 326	if (++blk_probes_ref == 1)
 327		blk_register_tracepoints();
 328	mutex_unlock(&blk_probe_mutex);
 329}
 330
 331static void put_probe_ref(void)
 332{
 333	mutex_lock(&blk_probe_mutex);
 334	if (!--blk_probes_ref)
 335		blk_unregister_tracepoints();
 336	mutex_unlock(&blk_probe_mutex);
 337}
 338
 339static void blk_trace_cleanup(struct blk_trace *bt)
 340{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341	synchronize_rcu();
 342	blk_trace_free(bt);
 343	put_probe_ref();
 344}
 345
 346static int __blk_trace_remove(struct request_queue *q)
 347{
 348	struct blk_trace *bt;
 349
 350	bt = rcu_replace_pointer(q->blk_trace, NULL,
 351				 lockdep_is_held(&q->debugfs_mutex));
 352	if (!bt)
 353		return -EINVAL;
 354
 355	if (bt->trace_state != Blktrace_running)
 356		blk_trace_cleanup(bt);
 357
 358	return 0;
 359}
 360
 361int blk_trace_remove(struct request_queue *q)
 362{
 363	int ret;
 364
 365	mutex_lock(&q->debugfs_mutex);
 366	ret = __blk_trace_remove(q);
 367	mutex_unlock(&q->debugfs_mutex);
 368
 369	return ret;
 370}
 371EXPORT_SYMBOL_GPL(blk_trace_remove);
 372
 373static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
 374				size_t count, loff_t *ppos)
 375{
 376	struct blk_trace *bt = filp->private_data;
 377	char buf[16];
 378
 379	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
 380
 381	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 382}
 383
 384static const struct file_operations blk_dropped_fops = {
 385	.owner =	THIS_MODULE,
 386	.open =		simple_open,
 387	.read =		blk_dropped_read,
 388	.llseek =	default_llseek,
 389};
 390
 391static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
 392				size_t count, loff_t *ppos)
 393{
 394	char *msg;
 395	struct blk_trace *bt;
 396
 397	if (count >= BLK_TN_MAX_MSG)
 398		return -EINVAL;
 399
 400	msg = memdup_user_nul(buffer, count);
 401	if (IS_ERR(msg))
 402		return PTR_ERR(msg);
 403
 404	bt = filp->private_data;
 405	__trace_note_message(bt, NULL, "%s", msg);
 406	kfree(msg);
 407
 408	return count;
 409}
 410
 411static const struct file_operations blk_msg_fops = {
 412	.owner =	THIS_MODULE,
 413	.open =		simple_open,
 414	.write =	blk_msg_write,
 415	.llseek =	noop_llseek,
 416};
 417
 418/*
 419 * Keep track of how many times we encountered a full subbuffer, to aid
 420 * the user space app in telling how many lost events there were.
 421 */
 422static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
 423				     void *prev_subbuf, size_t prev_padding)
 424{
 425	struct blk_trace *bt;
 426
 427	if (!relay_buf_full(buf))
 428		return 1;
 429
 430	bt = buf->chan->private_data;
 431	atomic_inc(&bt->dropped);
 432	return 0;
 433}
 434
 435static int blk_remove_buf_file_callback(struct dentry *dentry)
 436{
 437	debugfs_remove(dentry);
 438
 439	return 0;
 440}
 441
 442static struct dentry *blk_create_buf_file_callback(const char *filename,
 443						   struct dentry *parent,
 444						   umode_t mode,
 445						   struct rchan_buf *buf,
 446						   int *is_global)
 447{
 448	return debugfs_create_file(filename, mode, parent, buf,
 449					&relay_file_operations);
 450}
 451
 452static struct rchan_callbacks blk_relay_callbacks = {
 453	.subbuf_start		= blk_subbuf_start_callback,
 454	.create_buf_file	= blk_create_buf_file_callback,
 455	.remove_buf_file	= blk_remove_buf_file_callback,
 456};
 457
 458static void blk_trace_setup_lba(struct blk_trace *bt,
 459				struct block_device *bdev)
 460{
 461	struct hd_struct *part = NULL;
 462
 463	if (bdev)
 464		part = bdev->bd_part;
 465
 466	if (part) {
 467		bt->start_lba = part->start_sect;
 468		bt->end_lba = part->start_sect + part->nr_sects;
 469	} else {
 470		bt->start_lba = 0;
 471		bt->end_lba = -1ULL;
 472	}
 473}
 474
 475/*
 476 * Setup everything required to start tracing
 477 */
 478static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 479			      struct block_device *bdev,
 480			      struct blk_user_trace_setup *buts)
 481{
 482	struct blk_trace *bt = NULL;
 483	struct dentry *dir = NULL;
 484	int ret;
 485
 486	lockdep_assert_held(&q->debugfs_mutex);
 487
 488	if (!buts->buf_size || !buts->buf_nr)
 489		return -EINVAL;
 490
 491	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
 492	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
 493
 494	/*
 495	 * some device names have larger paths - convert the slashes
 496	 * to underscores for this to work as expected
 497	 */
 498	strreplace(buts->name, '/', '_');
 499
 500	/*
 501	 * bdev can be NULL, as with scsi-generic, this is a helpful as
 502	 * we can be.
 503	 */
 504	if (rcu_dereference_protected(q->blk_trace,
 505				      lockdep_is_held(&q->debugfs_mutex))) {
 506		pr_warn("Concurrent blktraces are not allowed on %s\n",
 507			buts->name);
 508		return -EBUSY;
 509	}
 510
 511	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
 512	if (!bt)
 513		return -ENOMEM;
 514
 515	ret = -ENOMEM;
 516	bt->sequence = alloc_percpu(unsigned long);
 517	if (!bt->sequence)
 518		goto err;
 519
 520	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
 521	if (!bt->msg_data)
 522		goto err;
 523
 524	/*
 525	 * When tracing the whole disk reuse the existing debugfs directory
 526	 * created by the block layer on init. For partitions block devices,
 527	 * and scsi-generic block devices we create a temporary new debugfs
 528	 * directory that will be removed once the trace ends.
 529	 */
 530	if (bdev && bdev == bdev->bd_contains)
 531		dir = q->debugfs_dir;
 532	else
 533		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
 534
 535	/*
 536	 * As blktrace relies on debugfs for its interface the debugfs directory
 537	 * is required, contrary to the usual mantra of not checking for debugfs
 538	 * files or directories.
 539	 */
 540	if (IS_ERR_OR_NULL(dir)) {
 541		pr_warn("debugfs_dir not present for %s so skipping\n",
 542			buts->name);
 543		ret = -ENOENT;
 544		goto err;
 545	}
 546
 547	bt->dev = dev;
 548	atomic_set(&bt->dropped, 0);
 549	INIT_LIST_HEAD(&bt->running_list);
 550
 551	ret = -EIO;
 552	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
 553					       &blk_dropped_fops);
 554
 555	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
 556
 557	bt->rchan = relay_open("trace", dir, buts->buf_size,
 558				buts->buf_nr, &blk_relay_callbacks, bt);
 559	if (!bt->rchan)
 560		goto err;
 561
 562	bt->act_mask = buts->act_mask;
 563	if (!bt->act_mask)
 564		bt->act_mask = (u16) -1;
 565
 566	blk_trace_setup_lba(bt, bdev);
 567
 568	/* overwrite with user settings */
 569	if (buts->start_lba)
 570		bt->start_lba = buts->start_lba;
 571	if (buts->end_lba)
 572		bt->end_lba = buts->end_lba;
 573
 574	bt->pid = buts->pid;
 575	bt->trace_state = Blktrace_setup;
 576
 577	rcu_assign_pointer(q->blk_trace, bt);
 578	get_probe_ref();
 579
 580	ret = 0;
 581err:
 582	if (ret)
 583		blk_trace_free(bt);
 584	return ret;
 585}
 586
 587static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 588			     struct block_device *bdev, char __user *arg)
 589{
 590	struct blk_user_trace_setup buts;
 591	int ret;
 592
 593	ret = copy_from_user(&buts, arg, sizeof(buts));
 594	if (ret)
 595		return -EFAULT;
 596
 597	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 598	if (ret)
 599		return ret;
 600
 601	if (copy_to_user(arg, &buts, sizeof(buts))) {
 602		__blk_trace_remove(q);
 603		return -EFAULT;
 604	}
 605	return 0;
 606}
 607
 608int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 609		    struct block_device *bdev,
 610		    char __user *arg)
 611{
 612	int ret;
 613
 614	mutex_lock(&q->debugfs_mutex);
 615	ret = __blk_trace_setup(q, name, dev, bdev, arg);
 616	mutex_unlock(&q->debugfs_mutex);
 617
 618	return ret;
 619}
 620EXPORT_SYMBOL_GPL(blk_trace_setup);
 621
 622#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 623static int compat_blk_trace_setup(struct request_queue *q, char *name,
 624				  dev_t dev, struct block_device *bdev,
 625				  char __user *arg)
 626{
 627	struct blk_user_trace_setup buts;
 628	struct compat_blk_user_trace_setup cbuts;
 629	int ret;
 630
 631	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
 632		return -EFAULT;
 633
 634	buts = (struct blk_user_trace_setup) {
 635		.act_mask = cbuts.act_mask,
 636		.buf_size = cbuts.buf_size,
 637		.buf_nr = cbuts.buf_nr,
 638		.start_lba = cbuts.start_lba,
 639		.end_lba = cbuts.end_lba,
 640		.pid = cbuts.pid,
 641	};
 642
 643	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
 644	if (ret)
 645		return ret;
 646
 647	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
 648		__blk_trace_remove(q);
 649		return -EFAULT;
 650	}
 651
 652	return 0;
 653}
 654#endif
 655
 656static int __blk_trace_startstop(struct request_queue *q, int start)
 657{
 658	int ret;
 659	struct blk_trace *bt;
 660
 661	bt = rcu_dereference_protected(q->blk_trace,
 662				       lockdep_is_held(&q->debugfs_mutex));
 663	if (bt == NULL)
 664		return -EINVAL;
 665
 666	/*
 667	 * For starting a trace, we can transition from a setup or stopped
 668	 * trace. For stopping a trace, the state must be running
 669	 */
 670	ret = -EINVAL;
 671	if (start) {
 672		if (bt->trace_state == Blktrace_setup ||
 673		    bt->trace_state == Blktrace_stopped) {
 674			blktrace_seq++;
 675			smp_mb();
 676			bt->trace_state = Blktrace_running;
 677			spin_lock_irq(&running_trace_lock);
 678			list_add(&bt->running_list, &running_trace_list);
 679			spin_unlock_irq(&running_trace_lock);
 680
 681			trace_note_time(bt);
 682			ret = 0;
 683		}
 684	} else {
 685		if (bt->trace_state == Blktrace_running) {
 686			bt->trace_state = Blktrace_stopped;
 687			spin_lock_irq(&running_trace_lock);
 688			list_del_init(&bt->running_list);
 689			spin_unlock_irq(&running_trace_lock);
 690			relay_flush(bt->rchan);
 691			ret = 0;
 692		}
 693	}
 694
 695	return ret;
 696}
 697
 698int blk_trace_startstop(struct request_queue *q, int start)
 699{
 700	int ret;
 701
 702	mutex_lock(&q->debugfs_mutex);
 703	ret = __blk_trace_startstop(q, start);
 704	mutex_unlock(&q->debugfs_mutex);
 705
 706	return ret;
 707}
 708EXPORT_SYMBOL_GPL(blk_trace_startstop);
 709
 710/*
 711 * When reading or writing the blktrace sysfs files, the references to the
 712 * opened sysfs or device files should prevent the underlying block device
 713 * from being removed. So no further delete protection is really needed.
 714 */
 715
 716/**
 717 * blk_trace_ioctl: - handle the ioctls associated with tracing
 718 * @bdev:	the block device
 719 * @cmd:	the ioctl cmd
 720 * @arg:	the argument data, if any
 721 *
 722 **/
 723int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 724{
 725	struct request_queue *q;
 726	int ret, start = 0;
 727	char b[BDEVNAME_SIZE];
 728
 729	q = bdev_get_queue(bdev);
 730	if (!q)
 731		return -ENXIO;
 732
 733	mutex_lock(&q->debugfs_mutex);
 734
 735	switch (cmd) {
 736	case BLKTRACESETUP:
 737		bdevname(bdev, b);
 738		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 739		break;
 740#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
 741	case BLKTRACESETUP32:
 742		bdevname(bdev, b);
 743		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
 744		break;
 745#endif
 746	case BLKTRACESTART:
 747		start = 1;
 748		fallthrough;
 749	case BLKTRACESTOP:
 750		ret = __blk_trace_startstop(q, start);
 751		break;
 752	case BLKTRACETEARDOWN:
 753		ret = __blk_trace_remove(q);
 754		break;
 755	default:
 756		ret = -ENOTTY;
 757		break;
 758	}
 759
 760	mutex_unlock(&q->debugfs_mutex);
 761	return ret;
 762}
 763
 764/**
 765 * blk_trace_shutdown: - stop and cleanup trace structures
 766 * @q:    the request queue associated with the device
 767 *
 768 **/
 769void blk_trace_shutdown(struct request_queue *q)
 770{
 771	mutex_lock(&q->debugfs_mutex);
 772	if (rcu_dereference_protected(q->blk_trace,
 773				      lockdep_is_held(&q->debugfs_mutex))) {
 774		__blk_trace_startstop(q, 0);
 775		__blk_trace_remove(q);
 776	}
 777
 778	mutex_unlock(&q->debugfs_mutex);
 779}
 780
 781#ifdef CONFIG_BLK_CGROUP
 782static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 783{
 
 784	struct blk_trace *bt;
 785
 786	/* We don't use the 'bt' value here except as an optimization... */
 787	bt = rcu_dereference_protected(q->blk_trace, 1);
 788	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
 789		return 0;
 790
 791	if (!bio->bi_blkg)
 
 792		return 0;
 793	return cgroup_id(bio_blkcg(bio)->css.cgroup);
 794}
 795#else
 796u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 797{
 798	return 0;
 799}
 800#endif
 801
 802static u64
 803blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
 804{
 805	if (!rq->bio)
 806		return 0;
 807	/* Use the first bio */
 808	return blk_trace_bio_get_cgid(q, rq->bio);
 809}
 810
 811/*
 812 * blktrace probes
 813 */
 814
 815/**
 816 * blk_add_trace_rq - Add a trace for a request oriented action
 817 * @rq:		the source request
 818 * @error:	return status to log
 819 * @nr_bytes:	number of completed bytes
 820 * @what:	the action
 821 * @cgid:	the cgroup info
 822 *
 823 * Description:
 824 *     Records an action against a request. Will log the bio offset + size.
 825 *
 826 **/
 827static void blk_add_trace_rq(struct request *rq, int error,
 828			     unsigned int nr_bytes, u32 what, u64 cgid)
 829{
 830	struct blk_trace *bt;
 831
 832	rcu_read_lock();
 833	bt = rcu_dereference(rq->q->blk_trace);
 834	if (likely(!bt)) {
 835		rcu_read_unlock();
 836		return;
 837	}
 838
 839	if (blk_rq_is_passthrough(rq))
 840		what |= BLK_TC_ACT(BLK_TC_PC);
 841	else
 842		what |= BLK_TC_ACT(BLK_TC_FS);
 843
 844	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
 845			rq->cmd_flags, what, error, 0, NULL, cgid);
 846	rcu_read_unlock();
 847}
 848
 849static void blk_add_trace_rq_insert(void *ignore,
 850				    struct request_queue *q, struct request *rq)
 851{
 852	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
 853			 blk_trace_request_get_cgid(q, rq));
 854}
 855
 856static void blk_add_trace_rq_issue(void *ignore,
 857				   struct request_queue *q, struct request *rq)
 858{
 859	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
 860			 blk_trace_request_get_cgid(q, rq));
 861}
 862
 863static void blk_add_trace_rq_merge(void *ignore,
 864				   struct request_queue *q, struct request *rq)
 865{
 866	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
 867			 blk_trace_request_get_cgid(q, rq));
 868}
 869
 870static void blk_add_trace_rq_requeue(void *ignore,
 871				     struct request_queue *q,
 872				     struct request *rq)
 873{
 874	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
 875			 blk_trace_request_get_cgid(q, rq));
 876}
 877
 878static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
 879			int error, unsigned int nr_bytes)
 880{
 881	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
 882			 blk_trace_request_get_cgid(rq->q, rq));
 883}
 884
 885/**
 886 * blk_add_trace_bio - Add a trace for a bio oriented action
 887 * @q:		queue the io is for
 888 * @bio:	the source bio
 889 * @what:	the action
 890 * @error:	error, if any
 891 *
 892 * Description:
 893 *     Records an action against a bio. Will log the bio offset + size.
 894 *
 895 **/
 896static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 897			      u32 what, int error)
 898{
 899	struct blk_trace *bt;
 900
 901	rcu_read_lock();
 902	bt = rcu_dereference(q->blk_trace);
 903	if (likely(!bt)) {
 904		rcu_read_unlock();
 905		return;
 906	}
 907
 908	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 909			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
 910			blk_trace_bio_get_cgid(q, bio));
 911	rcu_read_unlock();
 912}
 913
 914static void blk_add_trace_bio_bounce(void *ignore,
 915				     struct request_queue *q, struct bio *bio)
 916{
 917	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 918}
 919
 920static void blk_add_trace_bio_complete(void *ignore,
 921				       struct request_queue *q, struct bio *bio)
 922{
 923	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
 924			  blk_status_to_errno(bio->bi_status));
 925}
 926
 927static void blk_add_trace_bio_backmerge(void *ignore,
 928					struct request_queue *q,
 929					struct request *rq,
 930					struct bio *bio)
 931{
 932	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 933}
 934
 935static void blk_add_trace_bio_frontmerge(void *ignore,
 936					 struct request_queue *q,
 937					 struct request *rq,
 938					 struct bio *bio)
 939{
 940	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 
 941}
 942
 943static void blk_add_trace_bio_queue(void *ignore,
 944				    struct request_queue *q, struct bio *bio)
 945{
 946	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 
 947}
 948
 949static void blk_add_trace_getrq(void *ignore,
 950				struct request_queue *q,
 951				struct bio *bio, int rw)
 952{
 953	if (bio)
 954		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 955	else {
 956		struct blk_trace *bt;
 957
 958		rcu_read_lock();
 959		bt = rcu_dereference(q->blk_trace);
 960		if (bt)
 961			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
 962					NULL, 0);
 963		rcu_read_unlock();
 964	}
 965}
 966
 967
 968static void blk_add_trace_sleeprq(void *ignore,
 969				  struct request_queue *q,
 970				  struct bio *bio, int rw)
 971{
 972	if (bio)
 973		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 974	else {
 975		struct blk_trace *bt;
 976
 977		rcu_read_lock();
 978		bt = rcu_dereference(q->blk_trace);
 979		if (bt)
 980			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
 981					0, 0, NULL, 0);
 982		rcu_read_unlock();
 983	}
 984}
 985
 986static void blk_add_trace_plug(void *ignore, struct request_queue *q)
 987{
 988	struct blk_trace *bt;
 989
 990	rcu_read_lock();
 991	bt = rcu_dereference(q->blk_trace);
 992	if (bt)
 993		__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
 994	rcu_read_unlock();
 995}
 996
 997static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
 998				    unsigned int depth, bool explicit)
 999{
1000	struct blk_trace *bt;
1001
1002	rcu_read_lock();
1003	bt = rcu_dereference(q->blk_trace);
1004	if (bt) {
1005		__be64 rpdu = cpu_to_be64(depth);
1006		u32 what;
1007
1008		if (explicit)
1009			what = BLK_TA_UNPLUG_IO;
1010		else
1011			what = BLK_TA_UNPLUG_TIMER;
1012
1013		__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
1014	}
1015	rcu_read_unlock();
1016}
1017
1018static void blk_add_trace_split(void *ignore,
1019				struct request_queue *q, struct bio *bio,
1020				unsigned int pdu)
1021{
 
1022	struct blk_trace *bt;
1023
1024	rcu_read_lock();
1025	bt = rcu_dereference(q->blk_trace);
1026	if (bt) {
1027		__be64 rpdu = cpu_to_be64(pdu);
1028
1029		__blk_add_trace(bt, bio->bi_iter.bi_sector,
1030				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1031				BLK_TA_SPLIT,
1032				blk_status_to_errno(bio->bi_status),
1033				sizeof(rpdu), &rpdu,
1034				blk_trace_bio_get_cgid(q, bio));
1035	}
1036	rcu_read_unlock();
1037}
1038
1039/**
1040 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1041 * @ignore:	trace callback data parameter (not used)
1042 * @q:		queue the io is for
1043 * @bio:	the source bio
1044 * @dev:	target device
1045 * @from:	source sector
1046 *
1047 * Description:
1048 *     Device mapper or raid target sometimes need to split a bio because
1049 *     it spans a stripe (or similar). Add a trace for that action.
1050 *
1051 **/
1052static void blk_add_trace_bio_remap(void *ignore,
1053				    struct request_queue *q, struct bio *bio,
1054				    dev_t dev, sector_t from)
1055{
 
1056	struct blk_trace *bt;
1057	struct blk_io_trace_remap r;
1058
1059	rcu_read_lock();
1060	bt = rcu_dereference(q->blk_trace);
1061	if (likely(!bt)) {
1062		rcu_read_unlock();
1063		return;
1064	}
1065
1066	r.device_from = cpu_to_be32(dev);
1067	r.device_to   = cpu_to_be32(bio_dev(bio));
1068	r.sector_from = cpu_to_be64(from);
1069
1070	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1071			bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1072			blk_status_to_errno(bio->bi_status),
1073			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1074	rcu_read_unlock();
1075}
1076
1077/**
1078 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1079 * @ignore:	trace callback data parameter (not used)
1080 * @q:		queue the io is for
1081 * @rq:		the source request
1082 * @dev:	target device
1083 * @from:	source sector
1084 *
1085 * Description:
1086 *     Device mapper remaps request to other devices.
1087 *     Add a trace for that action.
1088 *
1089 **/
1090static void blk_add_trace_rq_remap(void *ignore,
1091				   struct request_queue *q,
1092				   struct request *rq, dev_t dev,
1093				   sector_t from)
1094{
1095	struct blk_trace *bt;
1096	struct blk_io_trace_remap r;
1097
1098	rcu_read_lock();
1099	bt = rcu_dereference(q->blk_trace);
1100	if (likely(!bt)) {
1101		rcu_read_unlock();
1102		return;
1103	}
1104
1105	r.device_from = cpu_to_be32(dev);
1106	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1107	r.sector_from = cpu_to_be64(from);
1108
1109	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1110			rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1111			sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1112	rcu_read_unlock();
1113}
1114
1115/**
1116 * blk_add_driver_data - Add binary message with driver-specific data
1117 * @q:		queue the io is for
1118 * @rq:		io request
1119 * @data:	driver-specific data
1120 * @len:	length of driver-specific data
1121 *
1122 * Description:
1123 *     Some drivers might want to write driver-specific data per request.
1124 *
1125 **/
1126void blk_add_driver_data(struct request_queue *q,
1127			 struct request *rq,
1128			 void *data, size_t len)
1129{
1130	struct blk_trace *bt;
1131
1132	rcu_read_lock();
1133	bt = rcu_dereference(q->blk_trace);
1134	if (likely(!bt)) {
1135		rcu_read_unlock();
1136		return;
1137	}
1138
1139	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1140				BLK_TA_DRV_DATA, 0, len, data,
1141				blk_trace_request_get_cgid(q, rq));
1142	rcu_read_unlock();
1143}
1144EXPORT_SYMBOL_GPL(blk_add_driver_data);
1145
1146static void blk_register_tracepoints(void)
1147{
1148	int ret;
1149
1150	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1151	WARN_ON(ret);
1152	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1153	WARN_ON(ret);
1154	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1155	WARN_ON(ret);
1156	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1157	WARN_ON(ret);
1158	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1159	WARN_ON(ret);
1160	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1161	WARN_ON(ret);
1162	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1163	WARN_ON(ret);
1164	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1165	WARN_ON(ret);
1166	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1167	WARN_ON(ret);
1168	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1169	WARN_ON(ret);
1170	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1171	WARN_ON(ret);
1172	ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1173	WARN_ON(ret);
1174	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1175	WARN_ON(ret);
1176	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1177	WARN_ON(ret);
1178	ret = register_trace_block_split(blk_add_trace_split, NULL);
1179	WARN_ON(ret);
1180	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1181	WARN_ON(ret);
1182	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1183	WARN_ON(ret);
1184}
1185
1186static void blk_unregister_tracepoints(void)
1187{
1188	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1189	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1190	unregister_trace_block_split(blk_add_trace_split, NULL);
1191	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1192	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1193	unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1194	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1195	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1196	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1197	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1198	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1199	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1200	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1201	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1202	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1203	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1204	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1205
1206	tracepoint_synchronize_unregister();
1207}
1208
1209/*
1210 * struct blk_io_tracer formatting routines
1211 */
1212
1213static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1214{
1215	int i = 0;
1216	int tc = t->action >> BLK_TC_SHIFT;
1217
1218	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1219		rwbs[i++] = 'N';
1220		goto out;
1221	}
1222
1223	if (tc & BLK_TC_FLUSH)
1224		rwbs[i++] = 'F';
1225
1226	if (tc & BLK_TC_DISCARD)
1227		rwbs[i++] = 'D';
1228	else if (tc & BLK_TC_WRITE)
1229		rwbs[i++] = 'W';
1230	else if (t->bytes)
1231		rwbs[i++] = 'R';
1232	else
1233		rwbs[i++] = 'N';
1234
1235	if (tc & BLK_TC_FUA)
1236		rwbs[i++] = 'F';
1237	if (tc & BLK_TC_AHEAD)
1238		rwbs[i++] = 'A';
1239	if (tc & BLK_TC_SYNC)
1240		rwbs[i++] = 'S';
1241	if (tc & BLK_TC_META)
1242		rwbs[i++] = 'M';
1243out:
1244	rwbs[i] = '\0';
1245}
1246
1247static inline
1248const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1249{
1250	return (const struct blk_io_trace *)ent;
1251}
1252
1253static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1254{
1255	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1256}
1257
1258static inline u64 t_cgid(const struct trace_entry *ent)
1259{
1260	return *(u64 *)(te_blk_io_trace(ent) + 1);
1261}
1262
1263static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1264{
1265	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1266}
1267
1268static inline u32 t_action(const struct trace_entry *ent)
1269{
1270	return te_blk_io_trace(ent)->action;
1271}
1272
1273static inline u32 t_bytes(const struct trace_entry *ent)
1274{
1275	return te_blk_io_trace(ent)->bytes;
1276}
1277
1278static inline u32 t_sec(const struct trace_entry *ent)
1279{
1280	return te_blk_io_trace(ent)->bytes >> 9;
1281}
1282
1283static inline unsigned long long t_sector(const struct trace_entry *ent)
1284{
1285	return te_blk_io_trace(ent)->sector;
1286}
1287
1288static inline __u16 t_error(const struct trace_entry *ent)
1289{
1290	return te_blk_io_trace(ent)->error;
1291}
1292
1293static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1294{
1295	const __be64 *val = pdu_start(ent, has_cg);
1296	return be64_to_cpu(*val);
1297}
1298
1299typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1300	bool has_cg);
1301
1302static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1303	bool has_cg)
1304{
1305	char rwbs[RWBS_LEN];
1306	unsigned long long ts  = iter->ts;
1307	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1308	unsigned secs	       = (unsigned long)ts;
1309	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1310
1311	fill_rwbs(rwbs, t);
1312
1313	trace_seq_printf(&iter->seq,
1314			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1315			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1316			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1317}
1318
1319static void blk_log_action(struct trace_iterator *iter, const char *act,
1320	bool has_cg)
1321{
1322	char rwbs[RWBS_LEN];
1323	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1324
1325	fill_rwbs(rwbs, t);
1326	if (has_cg) {
1327		u64 id = t_cgid(iter->ent);
1328
1329		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1330			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1331
1332			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1333				sizeof(blkcg_name_buf));
1334			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1335				 MAJOR(t->device), MINOR(t->device),
1336				 blkcg_name_buf, act, rwbs);
1337		} else {
1338			/*
1339			 * The cgid portion used to be "INO,GEN".  Userland
1340			 * builds a FILEID_INO32_GEN fid out of them and
1341			 * opens the cgroup using open_by_handle_at(2).
1342			 * While 32bit ino setups are still the same, 64bit
1343			 * ones now use the 64bit ino as the whole ID and
1344			 * no longer use generation.
1345			 *
1346			 * Regarldess of the content, always output
1347			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1348			 * be mapped back to @id on both 64 and 32bit ino
1349			 * setups.  See __kernfs_fh_to_dentry().
1350			 */
1351			trace_seq_printf(&iter->seq,
1352				 "%3d,%-3d %llx,%-llx %2s %3s ",
1353				 MAJOR(t->device), MINOR(t->device),
1354				 id & U32_MAX, id >> 32, act, rwbs);
1355		}
1356	} else
1357		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1358				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1359}
1360
1361static void blk_log_dump_pdu(struct trace_seq *s,
1362	const struct trace_entry *ent, bool has_cg)
1363{
1364	const unsigned char *pdu_buf;
1365	int pdu_len;
1366	int i, end;
1367
1368	pdu_buf = pdu_start(ent, has_cg);
1369	pdu_len = pdu_real_len(ent, has_cg);
1370
1371	if (!pdu_len)
1372		return;
1373
1374	/* find the last zero that needs to be printed */
1375	for (end = pdu_len - 1; end >= 0; end--)
1376		if (pdu_buf[end])
1377			break;
1378	end++;
1379
1380	trace_seq_putc(s, '(');
1381
1382	for (i = 0; i < pdu_len; i++) {
1383
1384		trace_seq_printf(s, "%s%02x",
1385				 i == 0 ? "" : " ", pdu_buf[i]);
1386
1387		/*
1388		 * stop when the rest is just zeroes and indicate so
1389		 * with a ".." appended
1390		 */
1391		if (i == end && end != pdu_len - 1) {
1392			trace_seq_puts(s, " ..) ");
1393			return;
1394		}
1395	}
1396
1397	trace_seq_puts(s, ") ");
1398}
1399
1400static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1401{
1402	char cmd[TASK_COMM_LEN];
1403
1404	trace_find_cmdline(ent->pid, cmd);
1405
1406	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1407		trace_seq_printf(s, "%u ", t_bytes(ent));
1408		blk_log_dump_pdu(s, ent, has_cg);
1409		trace_seq_printf(s, "[%s]\n", cmd);
1410	} else {
1411		if (t_sec(ent))
1412			trace_seq_printf(s, "%llu + %u [%s]\n",
1413						t_sector(ent), t_sec(ent), cmd);
1414		else
1415			trace_seq_printf(s, "[%s]\n", cmd);
1416	}
1417}
1418
1419static void blk_log_with_error(struct trace_seq *s,
1420			      const struct trace_entry *ent, bool has_cg)
1421{
1422	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1423		blk_log_dump_pdu(s, ent, has_cg);
1424		trace_seq_printf(s, "[%d]\n", t_error(ent));
1425	} else {
1426		if (t_sec(ent))
1427			trace_seq_printf(s, "%llu + %u [%d]\n",
1428					 t_sector(ent),
1429					 t_sec(ent), t_error(ent));
1430		else
1431			trace_seq_printf(s, "%llu [%d]\n",
1432					 t_sector(ent), t_error(ent));
1433	}
1434}
1435
1436static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1437{
1438	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1439
1440	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1441			 t_sector(ent), t_sec(ent),
1442			 MAJOR(be32_to_cpu(__r->device_from)),
1443			 MINOR(be32_to_cpu(__r->device_from)),
1444			 be64_to_cpu(__r->sector_from));
1445}
1446
1447static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1448{
1449	char cmd[TASK_COMM_LEN];
1450
1451	trace_find_cmdline(ent->pid, cmd);
1452
1453	trace_seq_printf(s, "[%s]\n", cmd);
1454}
1455
1456static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1457{
1458	char cmd[TASK_COMM_LEN];
1459
1460	trace_find_cmdline(ent->pid, cmd);
1461
1462	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1463}
1464
1465static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1466{
1467	char cmd[TASK_COMM_LEN];
1468
1469	trace_find_cmdline(ent->pid, cmd);
1470
1471	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1472			 get_pdu_int(ent, has_cg), cmd);
1473}
1474
1475static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1476			bool has_cg)
1477{
1478
1479	trace_seq_putmem(s, pdu_start(ent, has_cg),
1480		pdu_real_len(ent, has_cg));
1481	trace_seq_putc(s, '\n');
1482}
1483
1484/*
1485 * struct tracer operations
1486 */
1487
1488static void blk_tracer_print_header(struct seq_file *m)
1489{
1490	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1491		return;
1492	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1493		    "#  |     |     |           |   |   |\n");
1494}
1495
1496static void blk_tracer_start(struct trace_array *tr)
1497{
1498	blk_tracer_enabled = true;
1499}
1500
1501static int blk_tracer_init(struct trace_array *tr)
1502{
1503	blk_tr = tr;
1504	blk_tracer_start(tr);
1505	return 0;
1506}
1507
1508static void blk_tracer_stop(struct trace_array *tr)
1509{
1510	blk_tracer_enabled = false;
1511}
1512
1513static void blk_tracer_reset(struct trace_array *tr)
1514{
1515	blk_tracer_stop(tr);
1516}
1517
1518static const struct {
1519	const char *act[2];
1520	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1521			    bool has_cg);
1522} what2act[] = {
1523	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1524	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1525	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1526	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1527	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1528	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1529	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1530	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1531	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1532	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1533	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1534	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1535	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1536	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1537	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1538};
1539
1540static enum print_line_t print_one_line(struct trace_iterator *iter,
1541					bool classic)
1542{
1543	struct trace_array *tr = iter->tr;
1544	struct trace_seq *s = &iter->seq;
1545	const struct blk_io_trace *t;
1546	u16 what;
1547	bool long_act;
1548	blk_log_action_t *log_action;
1549	bool has_cg;
1550
1551	t	   = te_blk_io_trace(iter->ent);
1552	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1553	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1554	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1555	has_cg	   = t->action & __BLK_TA_CGROUP;
1556
1557	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1558		log_action(iter, long_act ? "message" : "m", has_cg);
1559		blk_log_msg(s, iter->ent, has_cg);
1560		return trace_handle_return(s);
1561	}
1562
1563	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1564		trace_seq_printf(s, "Unknown action %x\n", what);
1565	else {
1566		log_action(iter, what2act[what].act[long_act], has_cg);
1567		what2act[what].print(s, iter->ent, has_cg);
1568	}
1569
1570	return trace_handle_return(s);
1571}
1572
1573static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1574					       int flags, struct trace_event *event)
1575{
1576	return print_one_line(iter, false);
1577}
1578
1579static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1580{
1581	struct trace_seq *s = &iter->seq;
1582	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1583	const int offset = offsetof(struct blk_io_trace, sector);
1584	struct blk_io_trace old = {
1585		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1586		.time     = iter->ts,
1587	};
1588
1589	trace_seq_putmem(s, &old, offset);
1590	trace_seq_putmem(s, &t->sector,
1591			 sizeof(old) - offset + t->pdu_len);
1592}
1593
1594static enum print_line_t
1595blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1596			     struct trace_event *event)
1597{
1598	blk_trace_synthesize_old_trace(iter);
1599
1600	return trace_handle_return(&iter->seq);
1601}
1602
1603static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1604{
1605	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
 
1606		return TRACE_TYPE_UNHANDLED;
1607
1608	return print_one_line(iter, true);
1609}
1610
1611static int
1612blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1613{
1614	/* don't output context-info for blk_classic output */
1615	if (bit == TRACE_BLK_OPT_CLASSIC) {
1616		if (set)
1617			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1618		else
1619			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1620	}
1621	return 0;
1622}
1623
1624static struct tracer blk_tracer __read_mostly = {
1625	.name		= "blk",
1626	.init		= blk_tracer_init,
1627	.reset		= blk_tracer_reset,
1628	.start		= blk_tracer_start,
1629	.stop		= blk_tracer_stop,
1630	.print_header	= blk_tracer_print_header,
1631	.print_line	= blk_tracer_print_line,
1632	.flags		= &blk_tracer_flags,
1633	.set_flag	= blk_tracer_set_flag,
1634};
1635
1636static struct trace_event_functions trace_blk_event_funcs = {
1637	.trace		= blk_trace_event_print,
1638	.binary		= blk_trace_event_print_binary,
1639};
1640
1641static struct trace_event trace_blk_event = {
1642	.type		= TRACE_BLK,
1643	.funcs		= &trace_blk_event_funcs,
1644};
1645
1646static int __init init_blk_tracer(void)
1647{
1648	if (!register_trace_event(&trace_blk_event)) {
1649		pr_warn("Warning: could not register block events\n");
1650		return 1;
1651	}
1652
1653	if (register_tracer(&blk_tracer) != 0) {
1654		pr_warn("Warning: could not register the block tracer\n");
1655		unregister_trace_event(&trace_blk_event);
1656		return 1;
1657	}
1658
1659	return 0;
1660}
1661
1662device_initcall(init_blk_tracer);
1663
1664static int blk_trace_remove_queue(struct request_queue *q)
1665{
1666	struct blk_trace *bt;
1667
1668	bt = rcu_replace_pointer(q->blk_trace, NULL,
1669				 lockdep_is_held(&q->debugfs_mutex));
1670	if (bt == NULL)
1671		return -EINVAL;
1672
 
 
1673	put_probe_ref();
1674	synchronize_rcu();
1675	blk_trace_free(bt);
1676	return 0;
1677}
1678
1679/*
1680 * Setup everything required to start tracing
1681 */
1682static int blk_trace_setup_queue(struct request_queue *q,
1683				 struct block_device *bdev)
1684{
1685	struct blk_trace *bt = NULL;
1686	int ret = -ENOMEM;
1687
1688	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1689	if (!bt)
1690		return -ENOMEM;
1691
1692	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1693	if (!bt->msg_data)
1694		goto free_bt;
1695
1696	bt->dev = bdev->bd_dev;
1697	bt->act_mask = (u16)-1;
1698
1699	blk_trace_setup_lba(bt, bdev);
1700
1701	rcu_assign_pointer(q->blk_trace, bt);
1702	get_probe_ref();
1703	return 0;
1704
1705free_bt:
1706	blk_trace_free(bt);
1707	return ret;
1708}
1709
1710/*
1711 * sysfs interface to enable and configure tracing
1712 */
1713
1714static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1715					 struct device_attribute *attr,
1716					 char *buf);
1717static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1718					  struct device_attribute *attr,
1719					  const char *buf, size_t count);
1720#define BLK_TRACE_DEVICE_ATTR(_name) \
1721	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1722		    sysfs_blk_trace_attr_show, \
1723		    sysfs_blk_trace_attr_store)
1724
1725static BLK_TRACE_DEVICE_ATTR(enable);
1726static BLK_TRACE_DEVICE_ATTR(act_mask);
1727static BLK_TRACE_DEVICE_ATTR(pid);
1728static BLK_TRACE_DEVICE_ATTR(start_lba);
1729static BLK_TRACE_DEVICE_ATTR(end_lba);
1730
1731static struct attribute *blk_trace_attrs[] = {
1732	&dev_attr_enable.attr,
1733	&dev_attr_act_mask.attr,
1734	&dev_attr_pid.attr,
1735	&dev_attr_start_lba.attr,
1736	&dev_attr_end_lba.attr,
1737	NULL
1738};
1739
1740struct attribute_group blk_trace_attr_group = {
1741	.name  = "trace",
1742	.attrs = blk_trace_attrs,
1743};
1744
1745static const struct {
1746	int mask;
1747	const char *str;
1748} mask_maps[] = {
1749	{ BLK_TC_READ,		"read"		},
1750	{ BLK_TC_WRITE,		"write"		},
1751	{ BLK_TC_FLUSH,		"flush"		},
1752	{ BLK_TC_SYNC,		"sync"		},
1753	{ BLK_TC_QUEUE,		"queue"		},
1754	{ BLK_TC_REQUEUE,	"requeue"	},
1755	{ BLK_TC_ISSUE,		"issue"		},
1756	{ BLK_TC_COMPLETE,	"complete"	},
1757	{ BLK_TC_FS,		"fs"		},
1758	{ BLK_TC_PC,		"pc"		},
1759	{ BLK_TC_NOTIFY,	"notify"	},
1760	{ BLK_TC_AHEAD,		"ahead"		},
1761	{ BLK_TC_META,		"meta"		},
1762	{ BLK_TC_DISCARD,	"discard"	},
1763	{ BLK_TC_DRV_DATA,	"drv_data"	},
1764	{ BLK_TC_FUA,		"fua"		},
1765};
1766
1767static int blk_trace_str2mask(const char *str)
1768{
1769	int i;
1770	int mask = 0;
1771	char *buf, *s, *token;
1772
1773	buf = kstrdup(str, GFP_KERNEL);
1774	if (buf == NULL)
1775		return -ENOMEM;
1776	s = strstrip(buf);
1777
1778	while (1) {
1779		token = strsep(&s, ",");
1780		if (token == NULL)
1781			break;
1782
1783		if (*token == '\0')
1784			continue;
1785
1786		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1787			if (strcasecmp(token, mask_maps[i].str) == 0) {
1788				mask |= mask_maps[i].mask;
1789				break;
1790			}
1791		}
1792		if (i == ARRAY_SIZE(mask_maps)) {
1793			mask = -EINVAL;
1794			break;
1795		}
1796	}
1797	kfree(buf);
1798
1799	return mask;
1800}
1801
1802static ssize_t blk_trace_mask2str(char *buf, int mask)
1803{
1804	int i;
1805	char *p = buf;
1806
1807	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1808		if (mask & mask_maps[i].mask) {
1809			p += sprintf(p, "%s%s",
1810				    (p == buf) ? "" : ",", mask_maps[i].str);
1811		}
1812	}
1813	*p++ = '\n';
1814
1815	return p - buf;
1816}
1817
1818static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1819{
1820	if (bdev->bd_disk == NULL)
1821		return NULL;
1822
1823	return bdev_get_queue(bdev);
1824}
1825
1826static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1827					 struct device_attribute *attr,
1828					 char *buf)
1829{
1830	struct hd_struct *p = dev_to_part(dev);
1831	struct request_queue *q;
1832	struct block_device *bdev;
1833	struct blk_trace *bt;
1834	ssize_t ret = -ENXIO;
1835
1836	bdev = bdget(part_devt(p));
1837	if (bdev == NULL)
1838		goto out;
1839
1840	q = blk_trace_get_queue(bdev);
1841	if (q == NULL)
1842		goto out_bdput;
1843
1844	mutex_lock(&q->debugfs_mutex);
1845
1846	bt = rcu_dereference_protected(q->blk_trace,
1847				       lockdep_is_held(&q->debugfs_mutex));
1848	if (attr == &dev_attr_enable) {
1849		ret = sprintf(buf, "%u\n", !!bt);
1850		goto out_unlock_bdev;
1851	}
1852
1853	if (bt == NULL)
1854		ret = sprintf(buf, "disabled\n");
1855	else if (attr == &dev_attr_act_mask)
1856		ret = blk_trace_mask2str(buf, bt->act_mask);
1857	else if (attr == &dev_attr_pid)
1858		ret = sprintf(buf, "%u\n", bt->pid);
1859	else if (attr == &dev_attr_start_lba)
1860		ret = sprintf(buf, "%llu\n", bt->start_lba);
1861	else if (attr == &dev_attr_end_lba)
1862		ret = sprintf(buf, "%llu\n", bt->end_lba);
1863
1864out_unlock_bdev:
1865	mutex_unlock(&q->debugfs_mutex);
1866out_bdput:
1867	bdput(bdev);
1868out:
1869	return ret;
1870}
1871
1872static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1873					  struct device_attribute *attr,
1874					  const char *buf, size_t count)
1875{
1876	struct block_device *bdev;
1877	struct request_queue *q;
1878	struct hd_struct *p;
1879	struct blk_trace *bt;
1880	u64 value;
1881	ssize_t ret = -EINVAL;
1882
1883	if (count == 0)
1884		goto out;
1885
1886	if (attr == &dev_attr_act_mask) {
1887		if (kstrtoull(buf, 0, &value)) {
1888			/* Assume it is a list of trace category names */
1889			ret = blk_trace_str2mask(buf);
1890			if (ret < 0)
1891				goto out;
1892			value = ret;
1893		}
1894	} else if (kstrtoull(buf, 0, &value))
1895		goto out;
1896
1897	ret = -ENXIO;
1898
1899	p = dev_to_part(dev);
1900	bdev = bdget(part_devt(p));
1901	if (bdev == NULL)
1902		goto out;
1903
1904	q = blk_trace_get_queue(bdev);
1905	if (q == NULL)
1906		goto out_bdput;
1907
1908	mutex_lock(&q->debugfs_mutex);
1909
1910	bt = rcu_dereference_protected(q->blk_trace,
1911				       lockdep_is_held(&q->debugfs_mutex));
1912	if (attr == &dev_attr_enable) {
1913		if (!!value == !!bt) {
1914			ret = 0;
1915			goto out_unlock_bdev;
1916		}
1917		if (value)
1918			ret = blk_trace_setup_queue(q, bdev);
1919		else
1920			ret = blk_trace_remove_queue(q);
1921		goto out_unlock_bdev;
1922	}
1923
1924	ret = 0;
1925	if (bt == NULL) {
1926		ret = blk_trace_setup_queue(q, bdev);
1927		bt = rcu_dereference_protected(q->blk_trace,
1928				lockdep_is_held(&q->debugfs_mutex));
1929	}
1930
1931	if (ret == 0) {
1932		if (attr == &dev_attr_act_mask)
1933			bt->act_mask = value;
1934		else if (attr == &dev_attr_pid)
1935			bt->pid = value;
1936		else if (attr == &dev_attr_start_lba)
1937			bt->start_lba = value;
1938		else if (attr == &dev_attr_end_lba)
1939			bt->end_lba = value;
1940	}
1941
1942out_unlock_bdev:
1943	mutex_unlock(&q->debugfs_mutex);
1944out_bdput:
1945	bdput(bdev);
1946out:
1947	return ret ? ret : count;
1948}
1949
1950int blk_trace_init_sysfs(struct device *dev)
1951{
1952	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1953}
1954
1955void blk_trace_remove_sysfs(struct device *dev)
1956{
1957	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1958}
1959
1960#endif /* CONFIG_BLK_DEV_IO_TRACE */
1961
1962#ifdef CONFIG_EVENT_TRACING
1963
1964void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
 
 
 
 
 
 
 
 
 
 
1965{
1966	int i = 0;
1967
1968	if (op & REQ_PREFLUSH)
1969		rwbs[i++] = 'F';
1970
1971	switch (op & REQ_OP_MASK) {
1972	case REQ_OP_WRITE:
1973	case REQ_OP_WRITE_SAME:
1974		rwbs[i++] = 'W';
1975		break;
1976	case REQ_OP_DISCARD:
1977		rwbs[i++] = 'D';
1978		break;
1979	case REQ_OP_SECURE_ERASE:
1980		rwbs[i++] = 'D';
1981		rwbs[i++] = 'E';
1982		break;
1983	case REQ_OP_FLUSH:
1984		rwbs[i++] = 'F';
1985		break;
1986	case REQ_OP_READ:
1987		rwbs[i++] = 'R';
1988		break;
1989	default:
1990		rwbs[i++] = 'N';
1991	}
1992
1993	if (op & REQ_FUA)
1994		rwbs[i++] = 'F';
1995	if (op & REQ_RAHEAD)
1996		rwbs[i++] = 'A';
1997	if (op & REQ_SYNC)
1998		rwbs[i++] = 'S';
1999	if (op & REQ_META)
2000		rwbs[i++] = 'M';
2001
2002	rwbs[i] = '\0';
2003}
2004EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2005
2006#endif /* CONFIG_EVENT_TRACING */
2007