Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2021, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Beau Belgrave <beaub@linux.microsoft.com>
   7 */
   8
   9#include <linux/bitmap.h>
  10#include <linux/cdev.h>
  11#include <linux/hashtable.h>
  12#include <linux/list.h>
  13#include <linux/io.h>
  14#include <linux/uio.h>
  15#include <linux/ioctl.h>
  16#include <linux/jhash.h>
  17#include <linux/refcount.h>
  18#include <linux/trace_events.h>
  19#include <linux/tracefs.h>
  20#include <linux/types.h>
  21#include <linux/uaccess.h>
  22#include <linux/highmem.h>
  23#include <linux/init.h>
  24#include <linux/user_events.h>
  25#include "trace_dynevent.h"
  26#include "trace_output.h"
  27#include "trace.h"
  28
  29#define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
  30
  31#define FIELD_DEPTH_TYPE 0
  32#define FIELD_DEPTH_NAME 1
  33#define FIELD_DEPTH_SIZE 2
  34
  35/* Limit how long of an event name plus args within the subsystem. */
  36#define MAX_EVENT_DESC 512
  37#define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
  38#define MAX_FIELD_ARRAY_SIZE 1024
  39
  40/*
  41 * Internal bits (kernel side only) to keep track of connected probes:
  42 * These are used when status is requested in text form about an event. These
  43 * bits are compared against an internal byte on the event to determine which
  44 * probes to print out to the user.
  45 *
  46 * These do not reflect the mapped bytes between the user and kernel space.
  47 */
  48#define EVENT_STATUS_FTRACE BIT(0)
  49#define EVENT_STATUS_PERF BIT(1)
  50#define EVENT_STATUS_OTHER BIT(7)
  51
  52/*
  53 * Stores the system name, tables, and locks for a group of events. This
  54 * allows isolation for events by various means.
  55 */
  56struct user_event_group {
  57	char		*system_name;
  58	struct		hlist_node node;
  59	struct		mutex reg_mutex;
  60	DECLARE_HASHTABLE(register_table, 8);
  61};
  62
  63/* Group for init_user_ns mapping, top-most group */
  64static struct user_event_group *init_group;
  65
  66/* Max allowed events for the whole system */
  67static unsigned int max_user_events = 32768;
  68
  69/* Current number of events on the whole system */
  70static unsigned int current_user_events;
  71
  72/*
  73 * Stores per-event properties, as users register events
  74 * within a file a user_event might be created if it does not
  75 * already exist. These are globally used and their lifetime
  76 * is tied to the refcnt member. These cannot go away until the
  77 * refcnt reaches one.
  78 */
  79struct user_event {
  80	struct user_event_group		*group;
  81	struct tracepoint		tracepoint;
  82	struct trace_event_call		call;
  83	struct trace_event_class	class;
  84	struct dyn_event		devent;
  85	struct hlist_node		node;
  86	struct list_head		fields;
  87	struct list_head		validators;
  88	struct work_struct		put_work;
  89	refcount_t			refcnt;
  90	int				min_size;
  91	int				reg_flags;
  92	char				status;
  93};
  94
  95/*
  96 * Stores per-mm/event properties that enable an address to be
  97 * updated properly for each task. As tasks are forked, we use
  98 * these to track enablement sites that are tied to an event.
  99 */
 100struct user_event_enabler {
 101	struct list_head	mm_enablers_link;
 102	struct user_event	*event;
 103	unsigned long		addr;
 104
 105	/* Track enable bit, flags, etc. Aligned for bitops. */
 106	unsigned long		values;
 107};
 108
 109/* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
 110#define ENABLE_VAL_BIT_MASK 0x3F
 111
 112/* Bit 6 is for faulting status of enablement */
 113#define ENABLE_VAL_FAULTING_BIT 6
 114
 115/* Bit 7 is for freeing status of enablement */
 116#define ENABLE_VAL_FREEING_BIT 7
 117
 118/* Bit 8 is for marking 32-bit on 64-bit */
 119#define ENABLE_VAL_32_ON_64_BIT 8
 120
 121#define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
 122
 123/* Only duplicate the bit and compat values */
 124#define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
 125
 126#define ENABLE_BITOPS(e) (&(e)->values)
 127
 128#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
 129
 130/* Used for asynchronous faulting in of pages */
 131struct user_event_enabler_fault {
 132	struct work_struct		work;
 133	struct user_event_mm		*mm;
 134	struct user_event_enabler	*enabler;
 135	int				attempt;
 136};
 137
 138static struct kmem_cache *fault_cache;
 139
 140/* Global list of memory descriptors using user_events */
 141static LIST_HEAD(user_event_mms);
 142static DEFINE_SPINLOCK(user_event_mms_lock);
 143
 144/*
 145 * Stores per-file events references, as users register events
 146 * within a file this structure is modified and freed via RCU.
 147 * The lifetime of this struct is tied to the lifetime of the file.
 148 * These are not shared and only accessible by the file that created it.
 149 */
 150struct user_event_refs {
 151	struct rcu_head		rcu;
 152	int			count;
 153	struct user_event	*events[];
 154};
 155
 156struct user_event_file_info {
 157	struct user_event_group	*group;
 158	struct user_event_refs	*refs;
 159};
 160
 161#define VALIDATOR_ENSURE_NULL (1 << 0)
 162#define VALIDATOR_REL (1 << 1)
 163
 164struct user_event_validator {
 165	struct list_head	user_event_link;
 166	int			offset;
 167	int			flags;
 168};
 169
 170static inline void align_addr_bit(unsigned long *addr, int *bit,
 171				  unsigned long *flags)
 172{
 173	if (IS_ALIGNED(*addr, sizeof(long))) {
 174#ifdef __BIG_ENDIAN
 175		/* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
 176		if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
 177			*bit += 32;
 178#endif
 179		return;
 180	}
 181
 182	*addr = ALIGN_DOWN(*addr, sizeof(long));
 183
 184	/*
 185	 * We only support 32 and 64 bit values. The only time we need
 186	 * to align is a 32 bit value on a 64 bit kernel, which on LE
 187	 * is always 32 bits, and on BE requires no change when unaligned.
 188	 */
 189#ifdef __LITTLE_ENDIAN
 190	*bit += 32;
 191#endif
 192}
 193
 194typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
 195				   void *tpdata, bool *faulted);
 196
 197static int user_event_parse(struct user_event_group *group, char *name,
 198			    char *args, char *flags,
 199			    struct user_event **newuser, int reg_flags);
 200
 201static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
 202static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
 203static void user_event_mm_put(struct user_event_mm *mm);
 204static int destroy_user_event(struct user_event *user);
 205
 206static u32 user_event_key(char *name)
 207{
 208	return jhash(name, strlen(name), 0);
 209}
 210
 211static bool user_event_capable(u16 reg_flags)
 212{
 213	/* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
 214	if (reg_flags & USER_EVENT_REG_PERSIST) {
 215		if (!perfmon_capable())
 216			return false;
 217	}
 218
 219	return true;
 220}
 221
 222static struct user_event *user_event_get(struct user_event *user)
 223{
 224	refcount_inc(&user->refcnt);
 225
 226	return user;
 227}
 228
 229static void delayed_destroy_user_event(struct work_struct *work)
 230{
 231	struct user_event *user = container_of(
 232		work, struct user_event, put_work);
 233
 234	mutex_lock(&event_mutex);
 235
 236	if (!refcount_dec_and_test(&user->refcnt))
 237		goto out;
 238
 239	if (destroy_user_event(user)) {
 240		/*
 241		 * The only reason this would fail here is if we cannot
 242		 * update the visibility of the event. In this case the
 243		 * event stays in the hashtable, waiting for someone to
 244		 * attempt to delete it later.
 245		 */
 246		pr_warn("user_events: Unable to delete event\n");
 247		refcount_set(&user->refcnt, 1);
 248	}
 249out:
 250	mutex_unlock(&event_mutex);
 251}
 252
 253static void user_event_put(struct user_event *user, bool locked)
 254{
 255	bool delete;
 256
 257	if (unlikely(!user))
 258		return;
 259
 260	/*
 261	 * When the event is not enabled for auto-delete there will always
 262	 * be at least 1 reference to the event. During the event creation
 263	 * we initially set the refcnt to 2 to achieve this. In those cases
 264	 * the caller must acquire event_mutex and after decrement check if
 265	 * the refcnt is 1, meaning this is the last reference. When auto
 266	 * delete is enabled, there will only be 1 ref, IE: refcnt will be
 267	 * only set to 1 during creation to allow the below checks to go
 268	 * through upon the last put. The last put must always be done with
 269	 * the event mutex held.
 270	 */
 271	if (!locked) {
 272		lockdep_assert_not_held(&event_mutex);
 273		delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
 274	} else {
 275		lockdep_assert_held(&event_mutex);
 276		delete = refcount_dec_and_test(&user->refcnt);
 277	}
 278
 279	if (!delete)
 280		return;
 281
 282	/*
 283	 * We now have the event_mutex in all cases, which ensures that
 284	 * no new references will be taken until event_mutex is released.
 285	 * New references come through find_user_event(), which requires
 286	 * the event_mutex to be held.
 287	 */
 288
 289	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
 290		/* We should not get here when persist flag is set */
 291		pr_alert("BUG: Auto-delete engaged on persistent event\n");
 292		goto out;
 293	}
 294
 295	/*
 296	 * Unfortunately we have to attempt the actual destroy in a work
 297	 * queue. This is because not all cases handle a trace_event_call
 298	 * being removed within the class->reg() operation for unregister.
 299	 */
 300	INIT_WORK(&user->put_work, delayed_destroy_user_event);
 301
 302	/*
 303	 * Since the event is still in the hashtable, we have to re-inc
 304	 * the ref count to 1. This count will be decremented and checked
 305	 * in the work queue to ensure it's still the last ref. This is
 306	 * needed because a user-process could register the same event in
 307	 * between the time of event_mutex release and the work queue
 308	 * running the delayed destroy. If we removed the item now from
 309	 * the hashtable, this would result in a timing window where a
 310	 * user process would fail a register because the trace_event_call
 311	 * register would fail in the tracing layers.
 312	 */
 313	refcount_set(&user->refcnt, 1);
 314
 315	if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
 316		/*
 317		 * If we fail we must wait for an admin to attempt delete or
 318		 * another register/close of the event, whichever is first.
 319		 */
 320		pr_warn("user_events: Unable to queue delayed destroy\n");
 321	}
 322out:
 323	/* Ensure if we didn't have event_mutex before we unlock it */
 324	if (!locked)
 325		mutex_unlock(&event_mutex);
 326}
 327
 328static void user_event_group_destroy(struct user_event_group *group)
 329{
 330	kfree(group->system_name);
 331	kfree(group);
 332}
 333
 334static char *user_event_group_system_name(void)
 335{
 336	char *system_name;
 337	int len = sizeof(USER_EVENTS_SYSTEM) + 1;
 338
 339	system_name = kmalloc(len, GFP_KERNEL);
 340
 341	if (!system_name)
 342		return NULL;
 343
 344	snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
 345
 346	return system_name;
 347}
 348
 349static struct user_event_group *current_user_event_group(void)
 350{
 351	return init_group;
 352}
 353
 354static struct user_event_group *user_event_group_create(void)
 355{
 356	struct user_event_group *group;
 357
 358	group = kzalloc(sizeof(*group), GFP_KERNEL);
 359
 360	if (!group)
 361		return NULL;
 362
 363	group->system_name = user_event_group_system_name();
 364
 365	if (!group->system_name)
 366		goto error;
 367
 368	mutex_init(&group->reg_mutex);
 369	hash_init(group->register_table);
 370
 371	return group;
 372error:
 373	if (group)
 374		user_event_group_destroy(group);
 375
 376	return NULL;
 377};
 378
 379static void user_event_enabler_destroy(struct user_event_enabler *enabler,
 380				       bool locked)
 381{
 382	list_del_rcu(&enabler->mm_enablers_link);
 383
 384	/* No longer tracking the event via the enabler */
 385	user_event_put(enabler->event, locked);
 386
 387	kfree(enabler);
 388}
 389
 390static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
 391				  int attempt)
 392{
 393	bool unlocked;
 394	int ret;
 395
 396	/*
 397	 * Normally this is low, ensure that it cannot be taken advantage of by
 398	 * bad user processes to cause excessive looping.
 399	 */
 400	if (attempt > 10)
 401		return -EFAULT;
 402
 403	mmap_read_lock(mm->mm);
 404
 405	/* Ensure MM has tasks, cannot use after exit_mm() */
 406	if (refcount_read(&mm->tasks) == 0) {
 407		ret = -ENOENT;
 408		goto out;
 409	}
 410
 411	ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
 412			       &unlocked);
 413out:
 414	mmap_read_unlock(mm->mm);
 415
 416	return ret;
 417}
 418
 419static int user_event_enabler_write(struct user_event_mm *mm,
 420				    struct user_event_enabler *enabler,
 421				    bool fixup_fault, int *attempt);
 422
 423static void user_event_enabler_fault_fixup(struct work_struct *work)
 424{
 425	struct user_event_enabler_fault *fault = container_of(
 426		work, struct user_event_enabler_fault, work);
 427	struct user_event_enabler *enabler = fault->enabler;
 428	struct user_event_mm *mm = fault->mm;
 429	unsigned long uaddr = enabler->addr;
 430	int attempt = fault->attempt;
 431	int ret;
 432
 433	ret = user_event_mm_fault_in(mm, uaddr, attempt);
 434
 435	if (ret && ret != -ENOENT) {
 436		struct user_event *user = enabler->event;
 437
 438		pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
 439			mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
 440	}
 441
 442	/* Prevent state changes from racing */
 443	mutex_lock(&event_mutex);
 444
 445	/* User asked for enabler to be removed during fault */
 446	if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
 447		user_event_enabler_destroy(enabler, true);
 448		goto out;
 449	}
 450
 451	/*
 452	 * If we managed to get the page, re-issue the write. We do not
 453	 * want to get into a possible infinite loop, which is why we only
 454	 * attempt again directly if the page came in. If we couldn't get
 455	 * the page here, then we will try again the next time the event is
 456	 * enabled/disabled.
 457	 */
 458	clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
 459
 460	if (!ret) {
 461		mmap_read_lock(mm->mm);
 462		user_event_enabler_write(mm, enabler, true, &attempt);
 463		mmap_read_unlock(mm->mm);
 464	}
 465out:
 466	mutex_unlock(&event_mutex);
 467
 468	/* In all cases we no longer need the mm or fault */
 469	user_event_mm_put(mm);
 470	kmem_cache_free(fault_cache, fault);
 471}
 472
 473static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
 474					   struct user_event_enabler *enabler,
 475					   int attempt)
 476{
 477	struct user_event_enabler_fault *fault;
 478
 479	fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
 480
 481	if (!fault)
 482		return false;
 483
 484	INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
 485	fault->mm = user_event_mm_get(mm);
 486	fault->enabler = enabler;
 487	fault->attempt = attempt;
 488
 489	/* Don't try to queue in again while we have a pending fault */
 490	set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
 491
 492	if (!schedule_work(&fault->work)) {
 493		/* Allow another attempt later */
 494		clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
 495
 496		user_event_mm_put(mm);
 497		kmem_cache_free(fault_cache, fault);
 498
 499		return false;
 500	}
 501
 502	return true;
 503}
 504
 505static int user_event_enabler_write(struct user_event_mm *mm,
 506				    struct user_event_enabler *enabler,
 507				    bool fixup_fault, int *attempt)
 508{
 509	unsigned long uaddr = enabler->addr;
 510	unsigned long *ptr;
 511	struct page *page;
 512	void *kaddr;
 513	int bit = ENABLE_BIT(enabler);
 514	int ret;
 515
 516	lockdep_assert_held(&event_mutex);
 517	mmap_assert_locked(mm->mm);
 518
 519	*attempt += 1;
 520
 521	/* Ensure MM has tasks, cannot use after exit_mm() */
 522	if (refcount_read(&mm->tasks) == 0)
 523		return -ENOENT;
 524
 525	if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
 526		     test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
 527		return -EBUSY;
 528
 529	align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
 530
 531	ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
 532				    &page, NULL);
 533
 534	if (unlikely(ret <= 0)) {
 535		if (!fixup_fault)
 536			return -EFAULT;
 537
 538		if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
 539			pr_warn("user_events: Unable to queue fault handler\n");
 540
 541		return -EFAULT;
 542	}
 543
 544	kaddr = kmap_local_page(page);
 545	ptr = kaddr + (uaddr & ~PAGE_MASK);
 546
 547	/* Update bit atomically, user tracers must be atomic as well */
 548	if (enabler->event && enabler->event->status)
 549		set_bit(bit, ptr);
 550	else
 551		clear_bit(bit, ptr);
 552
 553	kunmap_local(kaddr);
 554	unpin_user_pages_dirty_lock(&page, 1, true);
 555
 556	return 0;
 557}
 558
 559static bool user_event_enabler_exists(struct user_event_mm *mm,
 560				      unsigned long uaddr, unsigned char bit)
 561{
 562	struct user_event_enabler *enabler;
 563
 564	list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
 565		if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
 566			return true;
 567	}
 568
 569	return false;
 570}
 571
 572static void user_event_enabler_update(struct user_event *user)
 573{
 574	struct user_event_enabler *enabler;
 575	struct user_event_mm *next;
 576	struct user_event_mm *mm;
 577	int attempt;
 578
 579	lockdep_assert_held(&event_mutex);
 580
 581	/*
 582	 * We need to build a one-shot list of all the mms that have an
 583	 * enabler for the user_event passed in. This list is only valid
 584	 * while holding the event_mutex. The only reason for this is due
 585	 * to the global mm list being RCU protected and we use methods
 586	 * which can wait (mmap_read_lock and pin_user_pages_remote).
 587	 *
 588	 * NOTE: user_event_mm_get_all() increments the ref count of each
 589	 * mm that is added to the list to prevent removal timing windows.
 590	 * We must always put each mm after they are used, which may wait.
 591	 */
 592	mm = user_event_mm_get_all(user);
 593
 594	while (mm) {
 595		next = mm->next;
 596		mmap_read_lock(mm->mm);
 597
 598		list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
 599			if (enabler->event == user) {
 600				attempt = 0;
 601				user_event_enabler_write(mm, enabler, true, &attempt);
 602			}
 603		}
 604
 605		mmap_read_unlock(mm->mm);
 606		user_event_mm_put(mm);
 607		mm = next;
 608	}
 609}
 610
 611static bool user_event_enabler_dup(struct user_event_enabler *orig,
 612				   struct user_event_mm *mm)
 613{
 614	struct user_event_enabler *enabler;
 615
 616	/* Skip pending frees */
 617	if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
 618		return true;
 619
 620	enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
 621
 622	if (!enabler)
 623		return false;
 624
 625	enabler->event = user_event_get(orig->event);
 626	enabler->addr = orig->addr;
 627
 628	/* Only dup part of value (ignore future flags, etc) */
 629	enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
 630
 631	/* Enablers not exposed yet, RCU not required */
 632	list_add(&enabler->mm_enablers_link, &mm->enablers);
 633
 634	return true;
 635}
 636
 637static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
 638{
 639	refcount_inc(&mm->refcnt);
 640
 641	return mm;
 642}
 643
 644static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
 645{
 646	struct user_event_mm *found = NULL;
 647	struct user_event_enabler *enabler;
 648	struct user_event_mm *mm;
 649
 650	/*
 651	 * We use the mm->next field to build a one-shot list from the global
 652	 * RCU protected list. To build this list the event_mutex must be held.
 653	 * This lets us build a list without requiring allocs that could fail
 654	 * when user based events are most wanted for diagnostics.
 655	 */
 656	lockdep_assert_held(&event_mutex);
 657
 658	/*
 659	 * We do not want to block fork/exec while enablements are being
 660	 * updated, so we use RCU to walk the current tasks that have used
 661	 * user_events ABI for 1 or more events. Each enabler found in each
 662	 * task that matches the event being updated has a write to reflect
 663	 * the kernel state back into the process. Waits/faults must not occur
 664	 * during this. So we scan the list under RCU for all the mm that have
 665	 * the event within it. This is needed because mm_read_lock() can wait.
 666	 * Each user mm returned has a ref inc to handle remove RCU races.
 667	 */
 668	rcu_read_lock();
 669
 670	list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
 671		list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
 672			if (enabler->event == user) {
 673				mm->next = found;
 674				found = user_event_mm_get(mm);
 675				break;
 676			}
 677		}
 678	}
 679
 680	rcu_read_unlock();
 681
 682	return found;
 683}
 684
 685static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
 686{
 687	struct user_event_mm *user_mm;
 688
 689	user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
 690
 691	if (!user_mm)
 692		return NULL;
 693
 694	user_mm->mm = t->mm;
 695	INIT_LIST_HEAD(&user_mm->enablers);
 696	refcount_set(&user_mm->refcnt, 1);
 697	refcount_set(&user_mm->tasks, 1);
 698
 699	/*
 700	 * The lifetime of the memory descriptor can slightly outlast
 701	 * the task lifetime if a ref to the user_event_mm is taken
 702	 * between list_del_rcu() and call_rcu(). Therefore we need
 703	 * to take a reference to it to ensure it can live this long
 704	 * under this corner case. This can also occur in clones that
 705	 * outlast the parent.
 706	 */
 707	mmgrab(user_mm->mm);
 708
 709	return user_mm;
 710}
 711
 712static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
 713{
 714	unsigned long flags;
 715
 716	spin_lock_irqsave(&user_event_mms_lock, flags);
 717	list_add_rcu(&user_mm->mms_link, &user_event_mms);
 718	spin_unlock_irqrestore(&user_event_mms_lock, flags);
 719
 720	t->user_event_mm = user_mm;
 721}
 722
 723static struct user_event_mm *current_user_event_mm(void)
 724{
 725	struct user_event_mm *user_mm = current->user_event_mm;
 726
 727	if (user_mm)
 728		goto inc;
 729
 730	user_mm = user_event_mm_alloc(current);
 731
 732	if (!user_mm)
 733		goto error;
 734
 735	user_event_mm_attach(user_mm, current);
 736inc:
 737	refcount_inc(&user_mm->refcnt);
 738error:
 739	return user_mm;
 740}
 741
 742static void user_event_mm_destroy(struct user_event_mm *mm)
 743{
 744	struct user_event_enabler *enabler, *next;
 745
 746	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
 747		user_event_enabler_destroy(enabler, false);
 748
 749	mmdrop(mm->mm);
 750	kfree(mm);
 751}
 752
 753static void user_event_mm_put(struct user_event_mm *mm)
 754{
 755	if (mm && refcount_dec_and_test(&mm->refcnt))
 756		user_event_mm_destroy(mm);
 757}
 758
 759static void delayed_user_event_mm_put(struct work_struct *work)
 760{
 761	struct user_event_mm *mm;
 762
 763	mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
 764	user_event_mm_put(mm);
 765}
 766
 767void user_event_mm_remove(struct task_struct *t)
 768{
 769	struct user_event_mm *mm;
 770	unsigned long flags;
 771
 772	might_sleep();
 773
 774	mm = t->user_event_mm;
 775	t->user_event_mm = NULL;
 776
 777	/* Clone will increment the tasks, only remove if last clone */
 778	if (!refcount_dec_and_test(&mm->tasks))
 779		return;
 780
 781	/* Remove the mm from the list, so it can no longer be enabled */
 782	spin_lock_irqsave(&user_event_mms_lock, flags);
 783	list_del_rcu(&mm->mms_link);
 784	spin_unlock_irqrestore(&user_event_mms_lock, flags);
 785
 786	/*
 787	 * We need to wait for currently occurring writes to stop within
 788	 * the mm. This is required since exit_mm() snaps the current rss
 789	 * stats and clears them. On the final mmdrop(), check_mm() will
 790	 * report a bug if these increment.
 791	 *
 792	 * All writes/pins are done under mmap_read lock, take the write
 793	 * lock to ensure in-progress faults have completed. Faults that
 794	 * are pending but yet to run will check the task count and skip
 795	 * the fault since the mm is going away.
 796	 */
 797	mmap_write_lock(mm->mm);
 798	mmap_write_unlock(mm->mm);
 799
 800	/*
 801	 * Put for mm must be done after RCU delay to handle new refs in
 802	 * between the list_del_rcu() and now. This ensures any get refs
 803	 * during rcu_read_lock() are accounted for during list removal.
 804	 *
 805	 * CPU A			|	CPU B
 806	 * ---------------------------------------------------------------
 807	 * user_event_mm_remove()	|	rcu_read_lock();
 808	 * list_del_rcu()		|	list_for_each_entry_rcu();
 809	 * call_rcu()			|	refcount_inc();
 810	 * .				|	rcu_read_unlock();
 811	 * schedule_work()		|	.
 812	 * user_event_mm_put()		|	.
 813	 *
 814	 * mmdrop() cannot be called in the softirq context of call_rcu()
 815	 * so we use a work queue after call_rcu() to run within.
 816	 */
 817	INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
 818	queue_rcu_work(system_wq, &mm->put_rwork);
 819}
 820
 821void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
 822{
 823	struct user_event_mm *mm = user_event_mm_alloc(t);
 824	struct user_event_enabler *enabler;
 825
 826	if (!mm)
 827		return;
 828
 829	rcu_read_lock();
 830
 831	list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
 832		if (!user_event_enabler_dup(enabler, mm))
 833			goto error;
 834	}
 835
 836	rcu_read_unlock();
 837
 838	user_event_mm_attach(mm, t);
 839	return;
 840error:
 841	rcu_read_unlock();
 842	user_event_mm_destroy(mm);
 843}
 844
 845static bool current_user_event_enabler_exists(unsigned long uaddr,
 846					      unsigned char bit)
 847{
 848	struct user_event_mm *user_mm = current_user_event_mm();
 849	bool exists;
 850
 851	if (!user_mm)
 852		return false;
 853
 854	exists = user_event_enabler_exists(user_mm, uaddr, bit);
 855
 856	user_event_mm_put(user_mm);
 857
 858	return exists;
 859}
 860
 861static struct user_event_enabler
 862*user_event_enabler_create(struct user_reg *reg, struct user_event *user,
 863			   int *write_result)
 864{
 865	struct user_event_enabler *enabler;
 866	struct user_event_mm *user_mm;
 867	unsigned long uaddr = (unsigned long)reg->enable_addr;
 868	int attempt = 0;
 869
 870	user_mm = current_user_event_mm();
 871
 872	if (!user_mm)
 873		return NULL;
 874
 875	enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
 876
 877	if (!enabler)
 878		goto out;
 879
 880	enabler->event = user;
 881	enabler->addr = uaddr;
 882	enabler->values = reg->enable_bit;
 883
 884#if BITS_PER_LONG >= 64
 885	if (reg->enable_size == 4)
 886		set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
 887#endif
 888
 889retry:
 890	/* Prevents state changes from racing with new enablers */
 891	mutex_lock(&event_mutex);
 892
 893	/* Attempt to reflect the current state within the process */
 894	mmap_read_lock(user_mm->mm);
 895	*write_result = user_event_enabler_write(user_mm, enabler, false,
 896						 &attempt);
 897	mmap_read_unlock(user_mm->mm);
 898
 899	/*
 900	 * If the write works, then we will track the enabler. A ref to the
 901	 * underlying user_event is held by the enabler to prevent it going
 902	 * away while the enabler is still in use by a process. The ref is
 903	 * removed when the enabler is destroyed. This means a event cannot
 904	 * be forcefully deleted from the system until all tasks using it
 905	 * exit or run exec(), which includes forks and clones.
 906	 */
 907	if (!*write_result) {
 908		user_event_get(user);
 909		list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
 910	}
 911
 912	mutex_unlock(&event_mutex);
 913
 914	if (*write_result) {
 915		/* Attempt to fault-in and retry if it worked */
 916		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
 917			goto retry;
 918
 919		kfree(enabler);
 920		enabler = NULL;
 921	}
 922out:
 923	user_event_mm_put(user_mm);
 924
 925	return enabler;
 926}
 927
 928static __always_inline __must_check
 929bool user_event_last_ref(struct user_event *user)
 930{
 931	int last = 0;
 932
 933	if (user->reg_flags & USER_EVENT_REG_PERSIST)
 934		last = 1;
 935
 936	return refcount_read(&user->refcnt) == last;
 937}
 938
 939static __always_inline __must_check
 940size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
 941{
 942	size_t ret;
 943
 944	pagefault_disable();
 945
 946	ret = copy_from_iter_nocache(addr, bytes, i);
 947
 948	pagefault_enable();
 949
 950	return ret;
 951}
 952
 953static struct list_head *user_event_get_fields(struct trace_event_call *call)
 954{
 955	struct user_event *user = (struct user_event *)call->data;
 956
 957	return &user->fields;
 958}
 959
 960/*
 961 * Parses a register command for user_events
 962 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
 963 *
 964 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
 965 * 'id' field after:
 966 * test char[20] msg;unsigned int id
 967 *
 968 * NOTE: Offsets are from the user data perspective, they are not from the
 969 * trace_entry/buffer perspective. We automatically add the common properties
 970 * sizes to the offset for the user.
 971 *
 972 * Upon success user_event has its ref count increased by 1.
 973 */
 974static int user_event_parse_cmd(struct user_event_group *group,
 975				char *raw_command, struct user_event **newuser,
 976				int reg_flags)
 977{
 978	char *name = raw_command;
 979	char *args = strpbrk(name, " ");
 980	char *flags;
 981
 982	if (args)
 983		*args++ = '\0';
 984
 985	flags = strpbrk(name, ":");
 986
 987	if (flags)
 988		*flags++ = '\0';
 989
 990	return user_event_parse(group, name, args, flags, newuser, reg_flags);
 991}
 992
 993static int user_field_array_size(const char *type)
 994{
 995	const char *start = strchr(type, '[');
 996	char val[8];
 997	char *bracket;
 998	int size = 0;
 999
1000	if (start == NULL)
1001		return -EINVAL;
1002
1003	if (strscpy(val, start + 1, sizeof(val)) <= 0)
1004		return -EINVAL;
1005
1006	bracket = strchr(val, ']');
1007
1008	if (!bracket)
1009		return -EINVAL;
1010
1011	*bracket = '\0';
1012
1013	if (kstrtouint(val, 0, &size))
1014		return -EINVAL;
1015
1016	if (size > MAX_FIELD_ARRAY_SIZE)
1017		return -EINVAL;
1018
1019	return size;
1020}
1021
1022static int user_field_size(const char *type)
1023{
1024	/* long is not allowed from a user, since it's ambigious in size */
1025	if (strcmp(type, "s64") == 0)
1026		return sizeof(s64);
1027	if (strcmp(type, "u64") == 0)
1028		return sizeof(u64);
1029	if (strcmp(type, "s32") == 0)
1030		return sizeof(s32);
1031	if (strcmp(type, "u32") == 0)
1032		return sizeof(u32);
1033	if (strcmp(type, "int") == 0)
1034		return sizeof(int);
1035	if (strcmp(type, "unsigned int") == 0)
1036		return sizeof(unsigned int);
1037	if (strcmp(type, "s16") == 0)
1038		return sizeof(s16);
1039	if (strcmp(type, "u16") == 0)
1040		return sizeof(u16);
1041	if (strcmp(type, "short") == 0)
1042		return sizeof(short);
1043	if (strcmp(type, "unsigned short") == 0)
1044		return sizeof(unsigned short);
1045	if (strcmp(type, "s8") == 0)
1046		return sizeof(s8);
1047	if (strcmp(type, "u8") == 0)
1048		return sizeof(u8);
1049	if (strcmp(type, "char") == 0)
1050		return sizeof(char);
1051	if (strcmp(type, "unsigned char") == 0)
1052		return sizeof(unsigned char);
1053	if (str_has_prefix(type, "char["))
1054		return user_field_array_size(type);
1055	if (str_has_prefix(type, "unsigned char["))
1056		return user_field_array_size(type);
1057	if (str_has_prefix(type, "__data_loc "))
1058		return sizeof(u32);
1059	if (str_has_prefix(type, "__rel_loc "))
1060		return sizeof(u32);
1061
1062	/* Uknown basic type, error */
1063	return -EINVAL;
1064}
1065
1066static void user_event_destroy_validators(struct user_event *user)
1067{
1068	struct user_event_validator *validator, *next;
1069	struct list_head *head = &user->validators;
1070
1071	list_for_each_entry_safe(validator, next, head, user_event_link) {
1072		list_del(&validator->user_event_link);
1073		kfree(validator);
1074	}
1075}
1076
1077static void user_event_destroy_fields(struct user_event *user)
1078{
1079	struct ftrace_event_field *field, *next;
1080	struct list_head *head = &user->fields;
1081
1082	list_for_each_entry_safe(field, next, head, link) {
1083		list_del(&field->link);
1084		kfree(field);
1085	}
1086}
1087
1088static int user_event_add_field(struct user_event *user, const char *type,
1089				const char *name, int offset, int size,
1090				int is_signed, int filter_type)
1091{
1092	struct user_event_validator *validator;
1093	struct ftrace_event_field *field;
1094	int validator_flags = 0;
1095
1096	field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1097
1098	if (!field)
1099		return -ENOMEM;
1100
1101	if (str_has_prefix(type, "__data_loc "))
1102		goto add_validator;
1103
1104	if (str_has_prefix(type, "__rel_loc ")) {
1105		validator_flags |= VALIDATOR_REL;
1106		goto add_validator;
1107	}
1108
1109	goto add_field;
1110
1111add_validator:
1112	if (strstr(type, "char") != NULL)
1113		validator_flags |= VALIDATOR_ENSURE_NULL;
1114
1115	validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1116
1117	if (!validator) {
1118		kfree(field);
1119		return -ENOMEM;
1120	}
1121
1122	validator->flags = validator_flags;
1123	validator->offset = offset;
1124
1125	/* Want sequential access when validating */
1126	list_add_tail(&validator->user_event_link, &user->validators);
1127
1128add_field:
1129	field->type = type;
1130	field->name = name;
1131	field->offset = offset;
1132	field->size = size;
1133	field->is_signed = is_signed;
1134	field->filter_type = filter_type;
1135
1136	if (filter_type == FILTER_OTHER)
1137		field->filter_type = filter_assign_type(type);
1138
1139	list_add(&field->link, &user->fields);
1140
1141	/*
1142	 * Min size from user writes that are required, this does not include
1143	 * the size of trace_entry (common fields).
1144	 */
1145	user->min_size = (offset + size) - sizeof(struct trace_entry);
1146
1147	return 0;
1148}
1149
1150/*
1151 * Parses the values of a field within the description
1152 * Format: type name [size]
1153 */
1154static int user_event_parse_field(char *field, struct user_event *user,
1155				  u32 *offset)
1156{
1157	char *part, *type, *name;
1158	u32 depth = 0, saved_offset = *offset;
1159	int len, size = -EINVAL;
1160	bool is_struct = false;
1161
1162	field = skip_spaces(field);
1163
1164	if (*field == '\0')
1165		return 0;
1166
1167	/* Handle types that have a space within */
1168	len = str_has_prefix(field, "unsigned ");
1169	if (len)
1170		goto skip_next;
1171
1172	len = str_has_prefix(field, "struct ");
1173	if (len) {
1174		is_struct = true;
1175		goto skip_next;
1176	}
1177
1178	len = str_has_prefix(field, "__data_loc unsigned ");
1179	if (len)
1180		goto skip_next;
1181
1182	len = str_has_prefix(field, "__data_loc ");
1183	if (len)
1184		goto skip_next;
1185
1186	len = str_has_prefix(field, "__rel_loc unsigned ");
1187	if (len)
1188		goto skip_next;
1189
1190	len = str_has_prefix(field, "__rel_loc ");
1191	if (len)
1192		goto skip_next;
1193
1194	goto parse;
1195skip_next:
1196	type = field;
1197	field = strpbrk(field + len, " ");
1198
1199	if (field == NULL)
1200		return -EINVAL;
1201
1202	*field++ = '\0';
1203	depth++;
1204parse:
1205	name = NULL;
1206
1207	while ((part = strsep(&field, " ")) != NULL) {
1208		switch (depth++) {
1209		case FIELD_DEPTH_TYPE:
1210			type = part;
1211			break;
1212		case FIELD_DEPTH_NAME:
1213			name = part;
1214			break;
1215		case FIELD_DEPTH_SIZE:
1216			if (!is_struct)
1217				return -EINVAL;
1218
1219			if (kstrtou32(part, 10, &size))
1220				return -EINVAL;
1221			break;
1222		default:
1223			return -EINVAL;
1224		}
1225	}
1226
1227	if (depth < FIELD_DEPTH_SIZE || !name)
1228		return -EINVAL;
1229
1230	if (depth == FIELD_DEPTH_SIZE)
1231		size = user_field_size(type);
1232
1233	if (size == 0)
1234		return -EINVAL;
1235
1236	if (size < 0)
1237		return size;
1238
1239	*offset = saved_offset + size;
1240
1241	return user_event_add_field(user, type, name, saved_offset, size,
1242				    type[0] != 'u', FILTER_OTHER);
1243}
1244
1245static int user_event_parse_fields(struct user_event *user, char *args)
1246{
1247	char *field;
1248	u32 offset = sizeof(struct trace_entry);
1249	int ret = -EINVAL;
1250
1251	if (args == NULL)
1252		return 0;
1253
1254	while ((field = strsep(&args, ";")) != NULL) {
1255		ret = user_event_parse_field(field, user, &offset);
1256
1257		if (ret)
1258			break;
1259	}
1260
1261	return ret;
1262}
1263
1264static struct trace_event_fields user_event_fields_array[1];
1265
1266static const char *user_field_format(const char *type)
1267{
1268	if (strcmp(type, "s64") == 0)
1269		return "%lld";
1270	if (strcmp(type, "u64") == 0)
1271		return "%llu";
1272	if (strcmp(type, "s32") == 0)
1273		return "%d";
1274	if (strcmp(type, "u32") == 0)
1275		return "%u";
1276	if (strcmp(type, "int") == 0)
1277		return "%d";
1278	if (strcmp(type, "unsigned int") == 0)
1279		return "%u";
1280	if (strcmp(type, "s16") == 0)
1281		return "%d";
1282	if (strcmp(type, "u16") == 0)
1283		return "%u";
1284	if (strcmp(type, "short") == 0)
1285		return "%d";
1286	if (strcmp(type, "unsigned short") == 0)
1287		return "%u";
1288	if (strcmp(type, "s8") == 0)
1289		return "%d";
1290	if (strcmp(type, "u8") == 0)
1291		return "%u";
1292	if (strcmp(type, "char") == 0)
1293		return "%d";
1294	if (strcmp(type, "unsigned char") == 0)
1295		return "%u";
1296	if (strstr(type, "char[") != NULL)
1297		return "%s";
1298
1299	/* Unknown, likely struct, allowed treat as 64-bit */
1300	return "%llu";
1301}
1302
1303static bool user_field_is_dyn_string(const char *type, const char **str_func)
1304{
1305	if (str_has_prefix(type, "__data_loc ")) {
1306		*str_func = "__get_str";
1307		goto check;
1308	}
1309
1310	if (str_has_prefix(type, "__rel_loc ")) {
1311		*str_func = "__get_rel_str";
1312		goto check;
1313	}
1314
1315	return false;
1316check:
1317	return strstr(type, "char") != NULL;
1318}
1319
1320#define LEN_OR_ZERO (len ? len - pos : 0)
1321static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1322				     char *buf, int len, bool *colon)
1323{
1324	int pos = 0, i = *iout;
1325
1326	*colon = false;
1327
1328	for (; i < argc; ++i) {
1329		if (i != *iout)
1330			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1331
1332		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1333
1334		if (strchr(argv[i], ';')) {
1335			++i;
1336			*colon = true;
1337			break;
1338		}
1339	}
1340
1341	/* Actual set, advance i */
1342	if (len != 0)
1343		*iout = i;
1344
1345	return pos + 1;
1346}
1347
1348static int user_field_set_string(struct ftrace_event_field *field,
1349				 char *buf, int len, bool colon)
1350{
1351	int pos = 0;
1352
1353	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1354	pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1355	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1356
1357	if (str_has_prefix(field->type, "struct "))
1358		pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1359
1360	if (colon)
1361		pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1362
1363	return pos + 1;
1364}
1365
1366static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1367{
1368	struct ftrace_event_field *field;
1369	struct list_head *head = &user->fields;
1370	int pos = 0, depth = 0;
1371	const char *str_func;
1372
1373	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1374
1375	list_for_each_entry_reverse(field, head, link) {
1376		if (depth != 0)
1377			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1378
1379		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1380				field->name, user_field_format(field->type));
1381
1382		depth++;
1383	}
1384
1385	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1386
1387	list_for_each_entry_reverse(field, head, link) {
1388		if (user_field_is_dyn_string(field->type, &str_func))
1389			pos += snprintf(buf + pos, LEN_OR_ZERO,
1390					", %s(%s)", str_func, field->name);
1391		else
1392			pos += snprintf(buf + pos, LEN_OR_ZERO,
1393					", REC->%s", field->name);
1394	}
1395
1396	return pos + 1;
1397}
1398#undef LEN_OR_ZERO
1399
1400static int user_event_create_print_fmt(struct user_event *user)
1401{
1402	char *print_fmt;
1403	int len;
1404
1405	len = user_event_set_print_fmt(user, NULL, 0);
1406
1407	print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1408
1409	if (!print_fmt)
1410		return -ENOMEM;
1411
1412	user_event_set_print_fmt(user, print_fmt, len);
1413
1414	user->call.print_fmt = print_fmt;
1415
1416	return 0;
1417}
1418
1419static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1420						int flags,
1421						struct trace_event *event)
1422{
1423	return print_event_fields(iter, event);
1424}
1425
1426static struct trace_event_functions user_event_funcs = {
1427	.trace = user_event_print_trace,
1428};
1429
1430static int user_event_set_call_visible(struct user_event *user, bool visible)
1431{
1432	int ret;
1433	const struct cred *old_cred;
1434	struct cred *cred;
1435
1436	cred = prepare_creds();
1437
1438	if (!cred)
1439		return -ENOMEM;
1440
1441	/*
1442	 * While by default tracefs is locked down, systems can be configured
1443	 * to allow user_event files to be less locked down. The extreme case
1444	 * being "other" has read/write access to user_events_data/status.
1445	 *
1446	 * When not locked down, processes may not have permissions to
1447	 * add/remove calls themselves to tracefs. We need to temporarily
1448	 * switch to root file permission to allow for this scenario.
1449	 */
1450	cred->fsuid = GLOBAL_ROOT_UID;
1451
1452	old_cred = override_creds(cred);
1453
1454	if (visible)
1455		ret = trace_add_event_call(&user->call);
1456	else
1457		ret = trace_remove_event_call(&user->call);
1458
1459	revert_creds(old_cred);
1460	put_cred(cred);
1461
1462	return ret;
1463}
1464
1465static int destroy_user_event(struct user_event *user)
1466{
1467	int ret = 0;
1468
1469	lockdep_assert_held(&event_mutex);
1470
1471	/* Must destroy fields before call removal */
1472	user_event_destroy_fields(user);
1473
1474	ret = user_event_set_call_visible(user, false);
1475
1476	if (ret)
1477		return ret;
1478
1479	dyn_event_remove(&user->devent);
1480	hash_del(&user->node);
1481
1482	user_event_destroy_validators(user);
1483	kfree(user->call.print_fmt);
1484	kfree(EVENT_NAME(user));
1485	kfree(user);
1486
1487	if (current_user_events > 0)
1488		current_user_events--;
1489	else
1490		pr_alert("BUG: Bad current_user_events\n");
1491
1492	return ret;
1493}
1494
1495static struct user_event *find_user_event(struct user_event_group *group,
1496					  char *name, u32 *outkey)
1497{
1498	struct user_event *user;
1499	u32 key = user_event_key(name);
1500
1501	*outkey = key;
1502
1503	hash_for_each_possible(group->register_table, user, node, key)
1504		if (!strcmp(EVENT_NAME(user), name))
1505			return user_event_get(user);
1506
1507	return NULL;
1508}
1509
1510static int user_event_validate(struct user_event *user, void *data, int len)
1511{
1512	struct list_head *head = &user->validators;
1513	struct user_event_validator *validator;
1514	void *pos, *end = data + len;
1515	u32 loc, offset, size;
1516
1517	list_for_each_entry(validator, head, user_event_link) {
1518		pos = data + validator->offset;
1519
1520		/* Already done min_size check, no bounds check here */
1521		loc = *(u32 *)pos;
1522		offset = loc & 0xffff;
1523		size = loc >> 16;
1524
1525		if (likely(validator->flags & VALIDATOR_REL))
1526			pos += offset + sizeof(loc);
1527		else
1528			pos = data + offset;
1529
1530		pos += size;
1531
1532		if (unlikely(pos > end))
1533			return -EFAULT;
1534
1535		if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1536			if (unlikely(*(char *)(pos - 1) != '\0'))
1537				return -EFAULT;
1538	}
1539
1540	return 0;
1541}
1542
1543/*
1544 * Writes the user supplied payload out to a trace file.
1545 */
1546static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1547			      void *tpdata, bool *faulted)
1548{
1549	struct trace_event_file *file;
1550	struct trace_entry *entry;
1551	struct trace_event_buffer event_buffer;
1552	size_t size = sizeof(*entry) + i->count;
1553
1554	file = (struct trace_event_file *)tpdata;
1555
1556	if (!file ||
1557	    !(file->flags & EVENT_FILE_FL_ENABLED) ||
1558	    trace_trigger_soft_disabled(file))
1559		return;
1560
1561	/* Allocates and fills trace_entry, + 1 of this is data payload */
1562	entry = trace_event_buffer_reserve(&event_buffer, file, size);
1563
1564	if (unlikely(!entry))
1565		return;
1566
1567	if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1568		goto discard;
1569
1570	if (!list_empty(&user->validators) &&
1571	    unlikely(user_event_validate(user, entry, size)))
1572		goto discard;
1573
1574	trace_event_buffer_commit(&event_buffer);
1575
1576	return;
1577discard:
1578	*faulted = true;
1579	__trace_event_discard_commit(event_buffer.buffer,
1580				     event_buffer.event);
1581}
1582
1583#ifdef CONFIG_PERF_EVENTS
1584/*
1585 * Writes the user supplied payload out to perf ring buffer.
1586 */
1587static void user_event_perf(struct user_event *user, struct iov_iter *i,
1588			    void *tpdata, bool *faulted)
1589{
1590	struct hlist_head *perf_head;
1591
1592	perf_head = this_cpu_ptr(user->call.perf_events);
1593
1594	if (perf_head && !hlist_empty(perf_head)) {
1595		struct trace_entry *perf_entry;
1596		struct pt_regs *regs;
1597		size_t size = sizeof(*perf_entry) + i->count;
1598		int context;
1599
1600		perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1601						  &regs, &context);
1602
1603		if (unlikely(!perf_entry))
1604			return;
1605
1606		perf_fetch_caller_regs(regs);
1607
1608		if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1609			goto discard;
1610
1611		if (!list_empty(&user->validators) &&
1612		    unlikely(user_event_validate(user, perf_entry, size)))
1613			goto discard;
1614
1615		perf_trace_buf_submit(perf_entry, size, context,
1616				      user->call.event.type, 1, regs,
1617				      perf_head, NULL);
1618
1619		return;
1620discard:
1621		*faulted = true;
1622		perf_swevent_put_recursion_context(context);
1623	}
1624}
1625#endif
1626
1627/*
1628 * Update the enabled bit among all user processes.
1629 */
1630static void update_enable_bit_for(struct user_event *user)
1631{
1632	struct tracepoint *tp = &user->tracepoint;
1633	char status = 0;
1634
1635	if (atomic_read(&tp->key.enabled) > 0) {
1636		struct tracepoint_func *probe_func_ptr;
1637		user_event_func_t probe_func;
1638
1639		rcu_read_lock_sched();
1640
1641		probe_func_ptr = rcu_dereference_sched(tp->funcs);
1642
1643		if (probe_func_ptr) {
1644			do {
1645				probe_func = probe_func_ptr->func;
1646
1647				if (probe_func == user_event_ftrace)
1648					status |= EVENT_STATUS_FTRACE;
1649#ifdef CONFIG_PERF_EVENTS
1650				else if (probe_func == user_event_perf)
1651					status |= EVENT_STATUS_PERF;
1652#endif
1653				else
1654					status |= EVENT_STATUS_OTHER;
1655			} while ((++probe_func_ptr)->func);
1656		}
1657
1658		rcu_read_unlock_sched();
1659	}
1660
1661	user->status = status;
1662
1663	user_event_enabler_update(user);
1664}
1665
1666/*
1667 * Register callback for our events from tracing sub-systems.
1668 */
1669static int user_event_reg(struct trace_event_call *call,
1670			  enum trace_reg type,
1671			  void *data)
1672{
1673	struct user_event *user = (struct user_event *)call->data;
1674	int ret = 0;
1675
1676	if (!user)
1677		return -ENOENT;
1678
1679	switch (type) {
1680	case TRACE_REG_REGISTER:
1681		ret = tracepoint_probe_register(call->tp,
1682						call->class->probe,
1683						data);
1684		if (!ret)
1685			goto inc;
1686		break;
1687
1688	case TRACE_REG_UNREGISTER:
1689		tracepoint_probe_unregister(call->tp,
1690					    call->class->probe,
1691					    data);
1692		goto dec;
1693
1694#ifdef CONFIG_PERF_EVENTS
1695	case TRACE_REG_PERF_REGISTER:
1696		ret = tracepoint_probe_register(call->tp,
1697						call->class->perf_probe,
1698						data);
1699		if (!ret)
1700			goto inc;
1701		break;
1702
1703	case TRACE_REG_PERF_UNREGISTER:
1704		tracepoint_probe_unregister(call->tp,
1705					    call->class->perf_probe,
1706					    data);
1707		goto dec;
1708
1709	case TRACE_REG_PERF_OPEN:
1710	case TRACE_REG_PERF_CLOSE:
1711	case TRACE_REG_PERF_ADD:
1712	case TRACE_REG_PERF_DEL:
1713		break;
1714#endif
1715	}
1716
1717	return ret;
1718inc:
1719	user_event_get(user);
1720	update_enable_bit_for(user);
1721	return 0;
1722dec:
1723	update_enable_bit_for(user);
1724	user_event_put(user, true);
1725	return 0;
1726}
1727
1728static int user_event_create(const char *raw_command)
1729{
1730	struct user_event_group *group;
1731	struct user_event *user;
1732	char *name;
1733	int ret;
1734
1735	if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1736		return -ECANCELED;
1737
1738	raw_command += USER_EVENTS_PREFIX_LEN;
1739	raw_command = skip_spaces(raw_command);
1740
1741	name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1742
1743	if (!name)
1744		return -ENOMEM;
1745
1746	group = current_user_event_group();
1747
1748	if (!group) {
1749		kfree(name);
1750		return -ENOENT;
1751	}
1752
1753	mutex_lock(&group->reg_mutex);
1754
1755	/* Dyn events persist, otherwise they would cleanup immediately */
1756	ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1757
1758	if (!ret)
1759		user_event_put(user, false);
1760
1761	mutex_unlock(&group->reg_mutex);
1762
1763	if (ret)
1764		kfree(name);
1765
1766	return ret;
1767}
1768
1769static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1770{
1771	struct user_event *user = container_of(ev, struct user_event, devent);
1772	struct ftrace_event_field *field;
1773	struct list_head *head;
1774	int depth = 0;
1775
1776	seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1777
1778	head = trace_get_fields(&user->call);
1779
1780	list_for_each_entry_reverse(field, head, link) {
1781		if (depth == 0)
1782			seq_puts(m, " ");
1783		else
1784			seq_puts(m, "; ");
1785
1786		seq_printf(m, "%s %s", field->type, field->name);
1787
1788		if (str_has_prefix(field->type, "struct "))
1789			seq_printf(m, " %d", field->size);
1790
1791		depth++;
1792	}
1793
1794	seq_puts(m, "\n");
1795
1796	return 0;
1797}
1798
1799static bool user_event_is_busy(struct dyn_event *ev)
1800{
1801	struct user_event *user = container_of(ev, struct user_event, devent);
1802
1803	return !user_event_last_ref(user);
1804}
1805
1806static int user_event_free(struct dyn_event *ev)
1807{
1808	struct user_event *user = container_of(ev, struct user_event, devent);
1809
1810	if (!user_event_last_ref(user))
1811		return -EBUSY;
1812
1813	if (!user_event_capable(user->reg_flags))
1814		return -EPERM;
1815
1816	return destroy_user_event(user);
1817}
1818
1819static bool user_field_match(struct ftrace_event_field *field, int argc,
1820			     const char **argv, int *iout)
1821{
1822	char *field_name = NULL, *dyn_field_name = NULL;
1823	bool colon = false, match = false;
1824	int dyn_len, len;
1825
1826	if (*iout >= argc)
1827		return false;
1828
1829	dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1830					    0, &colon);
1831
1832	len = user_field_set_string(field, field_name, 0, colon);
1833
1834	if (dyn_len != len)
1835		return false;
1836
1837	dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1838	field_name = kmalloc(len, GFP_KERNEL);
1839
1840	if (!dyn_field_name || !field_name)
1841		goto out;
1842
1843	user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1844				  dyn_len, &colon);
1845
1846	user_field_set_string(field, field_name, len, colon);
1847
1848	match = strcmp(dyn_field_name, field_name) == 0;
1849out:
1850	kfree(dyn_field_name);
1851	kfree(field_name);
1852
1853	return match;
1854}
1855
1856static bool user_fields_match(struct user_event *user, int argc,
1857			      const char **argv)
1858{
1859	struct ftrace_event_field *field;
1860	struct list_head *head = &user->fields;
1861	int i = 0;
1862
1863	list_for_each_entry_reverse(field, head, link) {
1864		if (!user_field_match(field, argc, argv, &i))
1865			return false;
1866	}
1867
1868	if (i != argc)
1869		return false;
1870
1871	return true;
1872}
1873
1874static bool user_event_match(const char *system, const char *event,
1875			     int argc, const char **argv, struct dyn_event *ev)
1876{
1877	struct user_event *user = container_of(ev, struct user_event, devent);
1878	bool match;
1879
1880	match = strcmp(EVENT_NAME(user), event) == 0 &&
1881		(!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1882
1883	if (match && argc > 0)
1884		match = user_fields_match(user, argc, argv);
1885	else if (match && argc == 0)
1886		match = list_empty(&user->fields);
1887
1888	return match;
1889}
1890
1891static struct dyn_event_operations user_event_dops = {
1892	.create = user_event_create,
1893	.show = user_event_show,
1894	.is_busy = user_event_is_busy,
1895	.free = user_event_free,
1896	.match = user_event_match,
1897};
1898
1899static int user_event_trace_register(struct user_event *user)
1900{
1901	int ret;
1902
1903	ret = register_trace_event(&user->call.event);
1904
1905	if (!ret)
1906		return -ENODEV;
1907
1908	ret = user_event_set_call_visible(user, true);
1909
1910	if (ret)
1911		unregister_trace_event(&user->call.event);
1912
1913	return ret;
1914}
1915
1916/*
1917 * Parses the event name, arguments and flags then registers if successful.
1918 * The name buffer lifetime is owned by this method for success cases only.
1919 * Upon success the returned user_event has its ref count increased by 1.
1920 */
1921static int user_event_parse(struct user_event_group *group, char *name,
1922			    char *args, char *flags,
1923			    struct user_event **newuser, int reg_flags)
1924{
1925	int ret;
1926	u32 key;
1927	struct user_event *user;
1928	int argc = 0;
1929	char **argv;
1930
1931	/* Currently don't support any text based flags */
1932	if (flags != NULL)
1933		return -EINVAL;
1934
1935	if (!user_event_capable(reg_flags))
1936		return -EPERM;
1937
1938	/* Prevent dyn_event from racing */
1939	mutex_lock(&event_mutex);
1940	user = find_user_event(group, name, &key);
1941	mutex_unlock(&event_mutex);
1942
1943	if (user) {
1944		if (args) {
1945			argv = argv_split(GFP_KERNEL, args, &argc);
1946			if (!argv) {
1947				ret = -ENOMEM;
1948				goto error;
1949			}
1950
1951			ret = user_fields_match(user, argc, (const char **)argv);
1952			argv_free(argv);
1953
1954		} else
1955			ret = list_empty(&user->fields);
1956
1957		if (ret) {
1958			*newuser = user;
1959			/*
1960			 * Name is allocated by caller, free it since it already exists.
1961			 * Caller only worries about failure cases for freeing.
1962			 */
1963			kfree(name);
1964		} else {
1965			ret = -EADDRINUSE;
1966			goto error;
1967		}
1968
1969		return 0;
1970error:
1971		user_event_put(user, false);
1972		return ret;
1973	}
1974
1975	user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1976
1977	if (!user)
1978		return -ENOMEM;
1979
1980	INIT_LIST_HEAD(&user->class.fields);
1981	INIT_LIST_HEAD(&user->fields);
1982	INIT_LIST_HEAD(&user->validators);
1983
1984	user->group = group;
1985	user->tracepoint.name = name;
1986
1987	ret = user_event_parse_fields(user, args);
1988
1989	if (ret)
1990		goto put_user;
1991
1992	ret = user_event_create_print_fmt(user);
1993
1994	if (ret)
1995		goto put_user;
1996
1997	user->call.data = user;
1998	user->call.class = &user->class;
1999	user->call.name = name;
2000	user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
2001	user->call.tp = &user->tracepoint;
2002	user->call.event.funcs = &user_event_funcs;
2003	user->class.system = group->system_name;
2004
2005	user->class.fields_array = user_event_fields_array;
2006	user->class.get_fields = user_event_get_fields;
2007	user->class.reg = user_event_reg;
2008	user->class.probe = user_event_ftrace;
2009#ifdef CONFIG_PERF_EVENTS
2010	user->class.perf_probe = user_event_perf;
2011#endif
2012
2013	mutex_lock(&event_mutex);
2014
2015	if (current_user_events >= max_user_events) {
2016		ret = -EMFILE;
2017		goto put_user_lock;
2018	}
2019
2020	ret = user_event_trace_register(user);
2021
2022	if (ret)
2023		goto put_user_lock;
2024
2025	user->reg_flags = reg_flags;
2026
2027	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2028		/* Ensure we track self ref and caller ref (2) */
2029		refcount_set(&user->refcnt, 2);
2030	} else {
2031		/* Ensure we track only caller ref (1) */
2032		refcount_set(&user->refcnt, 1);
2033	}
2034
2035	dyn_event_init(&user->devent, &user_event_dops);
2036	dyn_event_add(&user->devent, &user->call);
2037	hash_add(group->register_table, &user->node, key);
2038	current_user_events++;
2039
2040	mutex_unlock(&event_mutex);
2041
2042	*newuser = user;
2043	return 0;
2044put_user_lock:
2045	mutex_unlock(&event_mutex);
2046put_user:
2047	user_event_destroy_fields(user);
2048	user_event_destroy_validators(user);
2049	kfree(user->call.print_fmt);
2050	kfree(user);
2051	return ret;
2052}
2053
2054/*
2055 * Deletes a previously created event if it is no longer being used.
2056 */
2057static int delete_user_event(struct user_event_group *group, char *name)
2058{
2059	u32 key;
2060	struct user_event *user = find_user_event(group, name, &key);
2061
2062	if (!user)
2063		return -ENOENT;
2064
2065	user_event_put(user, true);
2066
2067	if (!user_event_last_ref(user))
2068		return -EBUSY;
2069
2070	if (!user_event_capable(user->reg_flags))
2071		return -EPERM;
2072
2073	return destroy_user_event(user);
2074}
2075
2076/*
2077 * Validates the user payload and writes via iterator.
2078 */
2079static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2080{
2081	struct user_event_file_info *info = file->private_data;
2082	struct user_event_refs *refs;
2083	struct user_event *user = NULL;
2084	struct tracepoint *tp;
2085	ssize_t ret = i->count;
2086	int idx;
2087
2088	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2089		return -EFAULT;
2090
2091	if (idx < 0)
2092		return -EINVAL;
2093
2094	rcu_read_lock_sched();
2095
2096	refs = rcu_dereference_sched(info->refs);
2097
2098	/*
2099	 * The refs->events array is protected by RCU, and new items may be
2100	 * added. But the user retrieved from indexing into the events array
2101	 * shall be immutable while the file is opened.
2102	 */
2103	if (likely(refs && idx < refs->count))
2104		user = refs->events[idx];
2105
2106	rcu_read_unlock_sched();
2107
2108	if (unlikely(user == NULL))
2109		return -ENOENT;
2110
2111	if (unlikely(i->count < user->min_size))
2112		return -EINVAL;
2113
2114	tp = &user->tracepoint;
2115
2116	/*
2117	 * It's possible key.enabled disables after this check, however
2118	 * we don't mind if a few events are included in this condition.
2119	 */
2120	if (likely(atomic_read(&tp->key.enabled) > 0)) {
2121		struct tracepoint_func *probe_func_ptr;
2122		user_event_func_t probe_func;
2123		struct iov_iter copy;
2124		void *tpdata;
2125		bool faulted;
2126
2127		if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2128			return -EFAULT;
2129
2130		faulted = false;
2131
2132		rcu_read_lock_sched();
2133
2134		probe_func_ptr = rcu_dereference_sched(tp->funcs);
2135
2136		if (probe_func_ptr) {
2137			do {
2138				copy = *i;
2139				probe_func = probe_func_ptr->func;
2140				tpdata = probe_func_ptr->data;
2141				probe_func(user, &copy, tpdata, &faulted);
2142			} while ((++probe_func_ptr)->func);
2143		}
2144
2145		rcu_read_unlock_sched();
2146
2147		if (unlikely(faulted))
2148			return -EFAULT;
2149	} else
2150		return -EBADF;
2151
2152	return ret;
2153}
2154
2155static int user_events_open(struct inode *node, struct file *file)
2156{
2157	struct user_event_group *group;
2158	struct user_event_file_info *info;
2159
2160	group = current_user_event_group();
2161
2162	if (!group)
2163		return -ENOENT;
2164
2165	info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2166
2167	if (!info)
2168		return -ENOMEM;
2169
2170	info->group = group;
2171
2172	file->private_data = info;
2173
2174	return 0;
2175}
2176
2177static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2178				 size_t count, loff_t *ppos)
2179{
2180	struct iov_iter i;
2181
2182	if (unlikely(*ppos != 0))
2183		return -EFAULT;
2184
2185	if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i)))
2186		return -EFAULT;
2187
2188	return user_events_write_core(file, &i);
2189}
2190
2191static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2192{
2193	return user_events_write_core(kp->ki_filp, i);
2194}
2195
2196static int user_events_ref_add(struct user_event_file_info *info,
2197			       struct user_event *user)
2198{
2199	struct user_event_group *group = info->group;
2200	struct user_event_refs *refs, *new_refs;
2201	int i, size, count = 0;
2202
2203	refs = rcu_dereference_protected(info->refs,
2204					 lockdep_is_held(&group->reg_mutex));
2205
2206	if (refs) {
2207		count = refs->count;
2208
2209		for (i = 0; i < count; ++i)
2210			if (refs->events[i] == user)
2211				return i;
2212	}
2213
2214	size = struct_size(refs, events, count + 1);
2215
2216	new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2217
2218	if (!new_refs)
2219		return -ENOMEM;
2220
2221	new_refs->count = count + 1;
2222
2223	for (i = 0; i < count; ++i)
2224		new_refs->events[i] = refs->events[i];
2225
2226	new_refs->events[i] = user_event_get(user);
2227
2228	rcu_assign_pointer(info->refs, new_refs);
2229
2230	if (refs)
2231		kfree_rcu(refs, rcu);
2232
2233	return i;
2234}
2235
2236static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2237{
2238	u32 size;
2239	long ret;
2240
2241	ret = get_user(size, &ureg->size);
2242
2243	if (ret)
2244		return ret;
2245
2246	if (size > PAGE_SIZE)
2247		return -E2BIG;
2248
2249	if (size < offsetofend(struct user_reg, write_index))
2250		return -EINVAL;
2251
2252	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2253
2254	if (ret)
2255		return ret;
2256
2257	/* Ensure only valid flags */
2258	if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2259		return -EINVAL;
2260
2261	/* Ensure supported size */
2262	switch (kreg->enable_size) {
2263	case 4:
2264		/* 32-bit */
2265		break;
2266#if BITS_PER_LONG >= 64
2267	case 8:
2268		/* 64-bit */
2269		break;
2270#endif
2271	default:
2272		return -EINVAL;
2273	}
2274
2275	/* Ensure natural alignment */
2276	if (kreg->enable_addr % kreg->enable_size)
2277		return -EINVAL;
2278
2279	/* Ensure bit range for size */
2280	if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2281		return -EINVAL;
2282
2283	/* Ensure accessible */
2284	if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2285		       kreg->enable_size))
2286		return -EFAULT;
2287
2288	kreg->size = size;
2289
2290	return 0;
2291}
2292
2293/*
2294 * Registers a user_event on behalf of a user process.
2295 */
2296static long user_events_ioctl_reg(struct user_event_file_info *info,
2297				  unsigned long uarg)
2298{
2299	struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2300	struct user_reg reg;
2301	struct user_event *user;
2302	struct user_event_enabler *enabler;
2303	char *name;
2304	long ret;
2305	int write_result;
2306
2307	ret = user_reg_get(ureg, &reg);
2308
2309	if (ret)
2310		return ret;
2311
2312	/*
2313	 * Prevent users from using the same address and bit multiple times
2314	 * within the same mm address space. This can cause unexpected behavior
2315	 * for user processes that is far easier to debug if this is explictly
2316	 * an error upon registering.
2317	 */
2318	if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2319					      reg.enable_bit))
2320		return -EADDRINUSE;
2321
2322	name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2323			    MAX_EVENT_DESC);
2324
2325	if (IS_ERR(name)) {
2326		ret = PTR_ERR(name);
2327		return ret;
2328	}
2329
2330	ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2331
2332	if (ret) {
2333		kfree(name);
2334		return ret;
2335	}
2336
2337	ret = user_events_ref_add(info, user);
2338
2339	/* No longer need parse ref, ref_add either worked or not */
2340	user_event_put(user, false);
2341
2342	/* Positive number is index and valid */
2343	if (ret < 0)
2344		return ret;
2345
2346	/*
2347	 * user_events_ref_add succeeded:
2348	 * At this point we have a user_event, it's lifetime is bound by the
2349	 * reference count, not this file. If anything fails, the user_event
2350	 * still has a reference until the file is released. During release
2351	 * any remaining references (from user_events_ref_add) are decremented.
2352	 *
2353	 * Attempt to create an enabler, which too has a lifetime tied in the
2354	 * same way for the event. Once the task that caused the enabler to be
2355	 * created exits or issues exec() then the enablers it has created
2356	 * will be destroyed and the ref to the event will be decremented.
2357	 */
2358	enabler = user_event_enabler_create(&reg, user, &write_result);
2359
2360	if (!enabler)
2361		return -ENOMEM;
2362
2363	/* Write failed/faulted, give error back to caller */
2364	if (write_result)
2365		return write_result;
2366
2367	put_user((u32)ret, &ureg->write_index);
2368
2369	return 0;
2370}
2371
2372/*
2373 * Deletes a user_event on behalf of a user process.
2374 */
2375static long user_events_ioctl_del(struct user_event_file_info *info,
2376				  unsigned long uarg)
2377{
2378	void __user *ubuf = (void __user *)uarg;
2379	char *name;
2380	long ret;
2381
2382	name = strndup_user(ubuf, MAX_EVENT_DESC);
2383
2384	if (IS_ERR(name))
2385		return PTR_ERR(name);
2386
2387	/* event_mutex prevents dyn_event from racing */
2388	mutex_lock(&event_mutex);
2389	ret = delete_user_event(info->group, name);
2390	mutex_unlock(&event_mutex);
2391
2392	kfree(name);
2393
2394	return ret;
2395}
2396
2397static long user_unreg_get(struct user_unreg __user *ureg,
2398			   struct user_unreg *kreg)
2399{
2400	u32 size;
2401	long ret;
2402
2403	ret = get_user(size, &ureg->size);
2404
2405	if (ret)
2406		return ret;
2407
2408	if (size > PAGE_SIZE)
2409		return -E2BIG;
2410
2411	if (size < offsetofend(struct user_unreg, disable_addr))
2412		return -EINVAL;
2413
2414	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2415
2416	/* Ensure no reserved values, since we don't support any yet */
2417	if (kreg->__reserved || kreg->__reserved2)
2418		return -EINVAL;
2419
2420	return ret;
2421}
2422
2423static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2424				   unsigned long uaddr, unsigned char bit,
2425				   unsigned long flags)
2426{
2427	struct user_event_enabler enabler;
2428	int result;
2429	int attempt = 0;
2430
2431	memset(&enabler, 0, sizeof(enabler));
2432	enabler.addr = uaddr;
2433	enabler.values = bit | flags;
2434retry:
2435	/* Prevents state changes from racing with new enablers */
2436	mutex_lock(&event_mutex);
2437
2438	/* Force the bit to be cleared, since no event is attached */
2439	mmap_read_lock(user_mm->mm);
2440	result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2441	mmap_read_unlock(user_mm->mm);
2442
2443	mutex_unlock(&event_mutex);
2444
2445	if (result) {
2446		/* Attempt to fault-in and retry if it worked */
2447		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2448			goto retry;
2449	}
2450
2451	return result;
2452}
2453
2454/*
2455 * Unregisters an enablement address/bit within a task/user mm.
2456 */
2457static long user_events_ioctl_unreg(unsigned long uarg)
2458{
2459	struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2460	struct user_event_mm *mm = current->user_event_mm;
2461	struct user_event_enabler *enabler, *next;
2462	struct user_unreg reg;
2463	unsigned long flags;
2464	long ret;
2465
2466	ret = user_unreg_get(ureg, &reg);
2467
2468	if (ret)
2469		return ret;
2470
2471	if (!mm)
2472		return -ENOENT;
2473
2474	flags = 0;
2475	ret = -ENOENT;
2476
2477	/*
2478	 * Flags freeing and faulting are used to indicate if the enabler is in
2479	 * use at all. When faulting is set a page-fault is occurring asyncly.
2480	 * During async fault if freeing is set, the enabler will be destroyed.
2481	 * If no async fault is happening, we can destroy it now since we hold
2482	 * the event_mutex during these checks.
2483	 */
2484	mutex_lock(&event_mutex);
2485
2486	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2487		if (enabler->addr == reg.disable_addr &&
2488		    ENABLE_BIT(enabler) == reg.disable_bit) {
2489			set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2490
2491			/* We must keep compat flags for the clear */
2492			flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
2493
2494			if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2495				user_event_enabler_destroy(enabler, true);
2496
2497			/* Removed at least one */
2498			ret = 0;
2499		}
2500	}
2501
2502	mutex_unlock(&event_mutex);
2503
2504	/* Ensure bit is now cleared for user, regardless of event status */
2505	if (!ret)
2506		ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2507					      reg.disable_bit, flags);
2508
2509	return ret;
2510}
2511
2512/*
2513 * Handles the ioctl from user mode to register or alter operations.
2514 */
2515static long user_events_ioctl(struct file *file, unsigned int cmd,
2516			      unsigned long uarg)
2517{
2518	struct user_event_file_info *info = file->private_data;
2519	struct user_event_group *group = info->group;
2520	long ret = -ENOTTY;
2521
2522	switch (cmd) {
2523	case DIAG_IOCSREG:
2524		mutex_lock(&group->reg_mutex);
2525		ret = user_events_ioctl_reg(info, uarg);
2526		mutex_unlock(&group->reg_mutex);
2527		break;
2528
2529	case DIAG_IOCSDEL:
2530		mutex_lock(&group->reg_mutex);
2531		ret = user_events_ioctl_del(info, uarg);
2532		mutex_unlock(&group->reg_mutex);
2533		break;
2534
2535	case DIAG_IOCSUNREG:
2536		mutex_lock(&group->reg_mutex);
2537		ret = user_events_ioctl_unreg(uarg);
2538		mutex_unlock(&group->reg_mutex);
2539		break;
2540	}
2541
2542	return ret;
2543}
2544
2545/*
2546 * Handles the final close of the file from user mode.
2547 */
2548static int user_events_release(struct inode *node, struct file *file)
2549{
2550	struct user_event_file_info *info = file->private_data;
2551	struct user_event_group *group;
2552	struct user_event_refs *refs;
2553	int i;
2554
2555	if (!info)
2556		return -EINVAL;
2557
2558	group = info->group;
2559
2560	/*
2561	 * Ensure refs cannot change under any situation by taking the
2562	 * register mutex during the final freeing of the references.
2563	 */
2564	mutex_lock(&group->reg_mutex);
2565
2566	refs = info->refs;
2567
2568	if (!refs)
2569		goto out;
2570
2571	/*
2572	 * The lifetime of refs has reached an end, it's tied to this file.
2573	 * The underlying user_events are ref counted, and cannot be freed.
2574	 * After this decrement, the user_events may be freed elsewhere.
2575	 */
2576	for (i = 0; i < refs->count; ++i)
2577		user_event_put(refs->events[i], false);
2578
2579out:
2580	file->private_data = NULL;
2581
2582	mutex_unlock(&group->reg_mutex);
2583
2584	kfree(refs);
2585	kfree(info);
2586
2587	return 0;
2588}
2589
2590static const struct file_operations user_data_fops = {
2591	.open		= user_events_open,
2592	.write		= user_events_write,
2593	.write_iter	= user_events_write_iter,
2594	.unlocked_ioctl	= user_events_ioctl,
2595	.release	= user_events_release,
2596};
2597
2598static void *user_seq_start(struct seq_file *m, loff_t *pos)
2599{
2600	if (*pos)
2601		return NULL;
2602
2603	return (void *)1;
2604}
2605
2606static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2607{
2608	++*pos;
2609	return NULL;
2610}
2611
2612static void user_seq_stop(struct seq_file *m, void *p)
2613{
2614}
2615
2616static int user_seq_show(struct seq_file *m, void *p)
2617{
2618	struct user_event_group *group = m->private;
2619	struct user_event *user;
2620	char status;
2621	int i, active = 0, busy = 0;
2622
2623	if (!group)
2624		return -EINVAL;
2625
2626	mutex_lock(&group->reg_mutex);
2627
2628	hash_for_each(group->register_table, i, user, node) {
2629		status = user->status;
2630
2631		seq_printf(m, "%s", EVENT_NAME(user));
2632
2633		if (status != 0)
2634			seq_puts(m, " #");
2635
2636		if (status != 0) {
2637			seq_puts(m, " Used by");
2638			if (status & EVENT_STATUS_FTRACE)
2639				seq_puts(m, " ftrace");
2640			if (status & EVENT_STATUS_PERF)
2641				seq_puts(m, " perf");
2642			if (status & EVENT_STATUS_OTHER)
2643				seq_puts(m, " other");
2644			busy++;
2645		}
2646
2647		seq_puts(m, "\n");
2648		active++;
2649	}
2650
2651	mutex_unlock(&group->reg_mutex);
2652
2653	seq_puts(m, "\n");
2654	seq_printf(m, "Active: %d\n", active);
2655	seq_printf(m, "Busy: %d\n", busy);
2656
2657	return 0;
2658}
2659
2660static const struct seq_operations user_seq_ops = {
2661	.start	= user_seq_start,
2662	.next	= user_seq_next,
2663	.stop	= user_seq_stop,
2664	.show	= user_seq_show,
2665};
2666
2667static int user_status_open(struct inode *node, struct file *file)
2668{
2669	struct user_event_group *group;
2670	int ret;
2671
2672	group = current_user_event_group();
2673
2674	if (!group)
2675		return -ENOENT;
2676
2677	ret = seq_open(file, &user_seq_ops);
2678
2679	if (!ret) {
2680		/* Chain group to seq_file */
2681		struct seq_file *m = file->private_data;
2682
2683		m->private = group;
2684	}
2685
2686	return ret;
2687}
2688
2689static const struct file_operations user_status_fops = {
2690	.open		= user_status_open,
2691	.read		= seq_read,
2692	.llseek		= seq_lseek,
2693	.release	= seq_release,
2694};
2695
2696/*
2697 * Creates a set of tracefs files to allow user mode interactions.
2698 */
2699static int create_user_tracefs(void)
2700{
2701	struct dentry *edata, *emmap;
2702
2703	edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2704				    NULL, NULL, &user_data_fops);
2705
2706	if (!edata) {
2707		pr_warn("Could not create tracefs 'user_events_data' entry\n");
2708		goto err;
2709	}
2710
2711	emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2712				    NULL, NULL, &user_status_fops);
2713
2714	if (!emmap) {
2715		tracefs_remove(edata);
2716		pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2717		goto err;
2718	}
2719
2720	return 0;
2721err:
2722	return -ENODEV;
2723}
2724
2725static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2726				      void *buffer, size_t *lenp, loff_t *ppos)
2727{
2728	int ret;
2729
2730	mutex_lock(&event_mutex);
2731
2732	ret = proc_douintvec(table, write, buffer, lenp, ppos);
2733
2734	mutex_unlock(&event_mutex);
2735
2736	return ret;
2737}
2738
2739static struct ctl_table user_event_sysctls[] = {
2740	{
2741		.procname	= "user_events_max",
2742		.data		= &max_user_events,
2743		.maxlen		= sizeof(unsigned int),
2744		.mode		= 0644,
2745		.proc_handler	= set_max_user_events_sysctl,
2746	},
2747	{}
2748};
2749
2750static int __init trace_events_user_init(void)
2751{
2752	int ret;
2753
2754	fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2755
2756	if (!fault_cache)
2757		return -ENOMEM;
2758
2759	init_group = user_event_group_create();
2760
2761	if (!init_group) {
2762		kmem_cache_destroy(fault_cache);
2763		return -ENOMEM;
2764	}
2765
2766	ret = create_user_tracefs();
2767
2768	if (ret) {
2769		pr_warn("user_events could not register with tracefs\n");
2770		user_event_group_destroy(init_group);
2771		kmem_cache_destroy(fault_cache);
2772		init_group = NULL;
2773		return ret;
2774	}
2775
2776	if (dyn_event_register(&user_event_dops))
2777		pr_warn("user_events could not register with dyn_events\n");
2778
2779	register_sysctl_init("kernel", user_event_sysctls);
2780
2781	return 0;
2782}
2783
2784fs_initcall(trace_events_user_init);