Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * User-space Probes (UProbes)
   4 *
   5 * Copyright (C) IBM Corporation, 2008-2012
   6 * Authors:
   7 *	Srikar Dronamraju
   8 *	Jim Keniston
   9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/highmem.h>
  14#include <linux/pagemap.h>	/* read_mapping_page */
  15#include <linux/slab.h>
  16#include <linux/sched.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/coredump.h>
  19#include <linux/export.h>
  20#include <linux/rmap.h>		/* anon_vma_prepare */
  21#include <linux/mmu_notifier.h>	/* set_pte_at_notify */
  22#include <linux/swap.h>		/* try_to_free_swap */
  23#include <linux/ptrace.h>	/* user_enable_single_step */
  24#include <linux/kdebug.h>	/* notifier mechanism */
  25#include "../../mm/internal.h"	/* munlock_vma_page */
  26#include <linux/percpu-rwsem.h>
  27#include <linux/task_work.h>
  28#include <linux/shmem_fs.h>
  29#include <linux/khugepaged.h>
  30
  31#include <linux/uprobes.h>
  32
  33#define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
  34#define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
  35
  36static struct rb_root uprobes_tree = RB_ROOT;
  37/*
  38 * allows us to skip the uprobe_mmap if there are no uprobe events active
  39 * at this time.  Probably a fine grained per inode count is better?
  40 */
  41#define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
  42
  43static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
  44
  45#define UPROBES_HASH_SZ	13
  46/* serialize uprobe->pending_list */
  47static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  48#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  49
  50DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
  51
  52/* Have a copy of original instruction */
  53#define UPROBE_COPY_INSN	0
  54
  55struct uprobe {
  56	struct rb_node		rb_node;	/* node in the rb tree */
  57	refcount_t		ref;
  58	struct rw_semaphore	register_rwsem;
  59	struct rw_semaphore	consumer_rwsem;
  60	struct list_head	pending_list;
  61	struct uprobe_consumer	*consumers;
  62	struct inode		*inode;		/* Also hold a ref to inode */
  63	loff_t			offset;
  64	loff_t			ref_ctr_offset;
  65	unsigned long		flags;
  66
  67	/*
  68	 * The generic code assumes that it has two members of unknown type
  69	 * owned by the arch-specific code:
  70	 *
  71	 * 	insn -	copy_insn() saves the original instruction here for
  72	 *		arch_uprobe_analyze_insn().
  73	 *
  74	 *	ixol -	potentially modified instruction to execute out of
  75	 *		line, copied to xol_area by xol_get_insn_slot().
  76	 */
  77	struct arch_uprobe	arch;
  78};
  79
  80struct delayed_uprobe {
  81	struct list_head list;
  82	struct uprobe *uprobe;
  83	struct mm_struct *mm;
  84};
  85
  86static DEFINE_MUTEX(delayed_uprobe_lock);
  87static LIST_HEAD(delayed_uprobe_list);
  88
  89/*
  90 * Execute out of line area: anonymous executable mapping installed
  91 * by the probed task to execute the copy of the original instruction
  92 * mangled by set_swbp().
  93 *
  94 * On a breakpoint hit, thread contests for a slot.  It frees the
  95 * slot after singlestep. Currently a fixed number of slots are
  96 * allocated.
  97 */
  98struct xol_area {
  99	wait_queue_head_t 		wq;		/* if all slots are busy */
 100	atomic_t 			slot_count;	/* number of in-use slots */
 101	unsigned long 			*bitmap;	/* 0 = free slot */
 102
 103	struct vm_special_mapping	xol_mapping;
 104	struct page 			*pages[2];
 105	/*
 106	 * We keep the vma's vm_start rather than a pointer to the vma
 107	 * itself.  The probed process or a naughty kernel module could make
 108	 * the vma go away, and we must handle that reasonably gracefully.
 109	 */
 110	unsigned long 			vaddr;		/* Page(s) of instruction slots */
 111};
 112
 113/*
 114 * valid_vma: Verify if the specified vma is an executable vma
 115 * Relax restrictions while unregistering: vm_flags might have
 116 * changed after breakpoint was inserted.
 117 *	- is_register: indicates if we are in register context.
 118 *	- Return 1 if the specified virtual address is in an
 119 *	  executable vma.
 120 */
 121static bool valid_vma(struct vm_area_struct *vma, bool is_register)
 122{
 123	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
 124
 125	if (is_register)
 126		flags |= VM_WRITE;
 127
 128	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
 129}
 130
 131static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
 132{
 133	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 134}
 135
 136static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
 137{
 138	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
 139}
 140
 141/**
 142 * __replace_page - replace page in vma by new page.
 143 * based on replace_page in mm/ksm.c
 144 *
 145 * @vma:      vma that holds the pte pointing to page
 146 * @addr:     address the old @page is mapped at
 147 * @old_page: the page we are replacing by new_page
 148 * @new_page: the modified page we replace page by
 149 *
 150 * If @new_page is NULL, only unmap @old_page.
 151 *
 152 * Returns 0 on success, negative error code otherwise.
 153 */
 154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 155				struct page *old_page, struct page *new_page)
 156{
 
 
 157	struct mm_struct *mm = vma->vm_mm;
 158	struct page_vma_mapped_walk pvmw = {
 159		.page = compound_head(old_page),
 160		.vma = vma,
 161		.address = addr,
 162	};
 163	int err;
 164	struct mmu_notifier_range range;
 165	struct mem_cgroup *memcg;
 166
 167	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
 168				addr + PAGE_SIZE);
 169
 170	if (new_page) {
 171		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
 172					    &memcg, false);
 173		if (err)
 174			return err;
 175	}
 176
 177	/* For try_to_free_swap() and munlock_vma_page() below */
 178	lock_page(old_page);
 179
 180	mmu_notifier_invalidate_range_start(&range);
 181	err = -EAGAIN;
 182	if (!page_vma_mapped_walk(&pvmw)) {
 183		if (new_page)
 184			mem_cgroup_cancel_charge(new_page, memcg, false);
 185		goto unlock;
 186	}
 187	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 188
 189	if (new_page) {
 190		get_page(new_page);
 191		page_add_new_anon_rmap(new_page, vma, addr, false);
 192		mem_cgroup_commit_charge(new_page, memcg, false, false);
 193		lru_cache_add_active_or_unevictable(new_page, vma);
 194	} else
 195		/* no new page, just dec_mm_counter for old_page */
 196		dec_mm_counter(mm, MM_ANONPAGES);
 197
 198	if (!PageAnon(old_page)) {
 199		dec_mm_counter(mm, mm_counter_file(old_page));
 200		inc_mm_counter(mm, MM_ANONPAGES);
 201	}
 202
 203	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
 204	ptep_clear_flush_notify(vma, addr, pvmw.pte);
 205	if (new_page)
 206		set_pte_at_notify(mm, addr, pvmw.pte,
 207				  mk_pte(new_page, vma->vm_page_prot));
 208
 209	page_remove_rmap(old_page, false);
 210	if (!page_mapped(old_page))
 211		try_to_free_swap(old_page);
 212	page_vma_mapped_walk_done(&pvmw);
 213
 214	if (vma->vm_flags & VM_LOCKED)
 215		munlock_vma_page(old_page);
 216	put_page(old_page);
 217
 218	err = 0;
 219 unlock:
 220	mmu_notifier_invalidate_range_end(&range);
 221	unlock_page(old_page);
 222	return err;
 223}
 224
 225/**
 226 * is_swbp_insn - check if instruction is breakpoint instruction.
 227 * @insn: instruction to be checked.
 228 * Default implementation of is_swbp_insn
 229 * Returns true if @insn is a breakpoint instruction.
 230 */
 231bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 232{
 233	return *insn == UPROBE_SWBP_INSN;
 234}
 235
 236/**
 237 * is_trap_insn - check if instruction is breakpoint instruction.
 238 * @insn: instruction to be checked.
 239 * Default implementation of is_trap_insn
 240 * Returns true if @insn is a breakpoint instruction.
 241 *
 242 * This function is needed for the case where an architecture has multiple
 243 * trap instructions (like powerpc).
 244 */
 245bool __weak is_trap_insn(uprobe_opcode_t *insn)
 246{
 247	return is_swbp_insn(insn);
 248}
 249
 250static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
 251{
 252	void *kaddr = kmap_atomic(page);
 253	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
 254	kunmap_atomic(kaddr);
 255}
 256
 257static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
 258{
 259	void *kaddr = kmap_atomic(page);
 260	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
 261	kunmap_atomic(kaddr);
 262}
 263
 264static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
 265{
 266	uprobe_opcode_t old_opcode;
 267	bool is_swbp;
 268
 269	/*
 270	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
 271	 * We do not check if it is any other 'trap variant' which could
 272	 * be conditional trap instruction such as the one powerpc supports.
 273	 *
 274	 * The logic is that we do not care if the underlying instruction
 275	 * is a trap variant; uprobes always wins over any other (gdb)
 276	 * breakpoint.
 277	 */
 278	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
 279	is_swbp = is_swbp_insn(&old_opcode);
 280
 281	if (is_swbp_insn(new_opcode)) {
 282		if (is_swbp)		/* register: already installed? */
 283			return 0;
 284	} else {
 285		if (!is_swbp)		/* unregister: was it changed by us? */
 286			return 0;
 287	}
 288
 289	return 1;
 290}
 291
 292static struct delayed_uprobe *
 293delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
 294{
 295	struct delayed_uprobe *du;
 296
 297	list_for_each_entry(du, &delayed_uprobe_list, list)
 298		if (du->uprobe == uprobe && du->mm == mm)
 299			return du;
 300	return NULL;
 301}
 302
 303static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
 304{
 305	struct delayed_uprobe *du;
 306
 307	if (delayed_uprobe_check(uprobe, mm))
 308		return 0;
 309
 310	du  = kzalloc(sizeof(*du), GFP_KERNEL);
 311	if (!du)
 312		return -ENOMEM;
 313
 314	du->uprobe = uprobe;
 315	du->mm = mm;
 316	list_add(&du->list, &delayed_uprobe_list);
 317	return 0;
 318}
 319
 320static void delayed_uprobe_delete(struct delayed_uprobe *du)
 321{
 322	if (WARN_ON(!du))
 323		return;
 324	list_del(&du->list);
 325	kfree(du);
 326}
 327
 328static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
 329{
 330	struct list_head *pos, *q;
 331	struct delayed_uprobe *du;
 332
 333	if (!uprobe && !mm)
 334		return;
 335
 336	list_for_each_safe(pos, q, &delayed_uprobe_list) {
 337		du = list_entry(pos, struct delayed_uprobe, list);
 338
 339		if (uprobe && du->uprobe != uprobe)
 340			continue;
 341		if (mm && du->mm != mm)
 342			continue;
 343
 344		delayed_uprobe_delete(du);
 345	}
 346}
 347
 348static bool valid_ref_ctr_vma(struct uprobe *uprobe,
 349			      struct vm_area_struct *vma)
 350{
 351	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
 352
 353	return uprobe->ref_ctr_offset &&
 354		vma->vm_file &&
 355		file_inode(vma->vm_file) == uprobe->inode &&
 356		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
 357		vma->vm_start <= vaddr &&
 358		vma->vm_end > vaddr;
 359}
 360
 361static struct vm_area_struct *
 362find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
 363{
 
 364	struct vm_area_struct *tmp;
 365
 366	for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
 367		if (valid_ref_ctr_vma(uprobe, tmp))
 368			return tmp;
 369
 370	return NULL;
 371}
 372
 373static int
 374__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
 375{
 376	void *kaddr;
 377	struct page *page;
 378	struct vm_area_struct *vma;
 379	int ret;
 380	short *ptr;
 381
 382	if (!vaddr || !d)
 383		return -EINVAL;
 384
 385	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
 386			FOLL_WRITE, &page, &vma, NULL);
 387	if (unlikely(ret <= 0)) {
 388		/*
 389		 * We are asking for 1 page. If get_user_pages_remote() fails,
 390		 * it may return 0, in that case we have to return error.
 391		 */
 392		return ret == 0 ? -EBUSY : ret;
 393	}
 394
 395	kaddr = kmap_atomic(page);
 396	ptr = kaddr + (vaddr & ~PAGE_MASK);
 397
 398	if (unlikely(*ptr + d < 0)) {
 399		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
 400			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
 401		ret = -EINVAL;
 402		goto out;
 403	}
 404
 405	*ptr += d;
 406	ret = 0;
 407out:
 408	kunmap_atomic(kaddr);
 409	put_page(page);
 410	return ret;
 411}
 412
 413static void update_ref_ctr_warn(struct uprobe *uprobe,
 414				struct mm_struct *mm, short d)
 415{
 416	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
 417		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
 418		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
 419		(unsigned long long) uprobe->offset,
 420		(unsigned long long) uprobe->ref_ctr_offset, mm);
 421}
 422
 423static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
 424			  short d)
 425{
 426	struct vm_area_struct *rc_vma;
 427	unsigned long rc_vaddr;
 428	int ret = 0;
 429
 430	rc_vma = find_ref_ctr_vma(uprobe, mm);
 431
 432	if (rc_vma) {
 433		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
 434		ret = __update_ref_ctr(mm, rc_vaddr, d);
 435		if (ret)
 436			update_ref_ctr_warn(uprobe, mm, d);
 437
 438		if (d > 0)
 439			return ret;
 440	}
 441
 442	mutex_lock(&delayed_uprobe_lock);
 443	if (d > 0)
 444		ret = delayed_uprobe_add(uprobe, mm);
 445	else
 446		delayed_uprobe_remove(uprobe, mm);
 447	mutex_unlock(&delayed_uprobe_lock);
 448
 449	return ret;
 450}
 451
 452/*
 453 * NOTE:
 454 * Expect the breakpoint instruction to be the smallest size instruction for
 455 * the architecture. If an arch has variable length instruction and the
 456 * breakpoint instruction is not of the smallest length instruction
 457 * supported by that architecture then we need to modify is_trap_at_addr and
 458 * uprobe_write_opcode accordingly. This would never be a problem for archs
 459 * that have fixed length instructions.
 460 *
 461 * uprobe_write_opcode - write the opcode at a given virtual address.
 
 462 * @mm: the probed process address space.
 463 * @vaddr: the virtual address to store the opcode.
 464 * @opcode: opcode to be written at @vaddr.
 465 *
 466 * Called with mm->mmap_sem held for write.
 467 * Return 0 (success) or a negative errno.
 468 */
 469int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 470			unsigned long vaddr, uprobe_opcode_t opcode)
 471{
 472	struct uprobe *uprobe;
 473	struct page *old_page, *new_page;
 474	struct vm_area_struct *vma;
 475	int ret, is_register, ref_ctr_updated = 0;
 476	bool orig_page_huge = false;
 477	unsigned int gup_flags = FOLL_FORCE;
 478
 479	is_register = is_swbp_insn(&opcode);
 480	uprobe = container_of(auprobe, struct uprobe, arch);
 481
 482retry:
 483	if (is_register)
 484		gup_flags |= FOLL_SPLIT_PMD;
 485	/* Read the page with vaddr into memory */
 486	ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
 487				    &old_page, &vma, NULL);
 488	if (ret <= 0)
 489		return ret;
 490
 491	ret = verify_opcode(old_page, vaddr, &opcode);
 492	if (ret <= 0)
 493		goto put_old;
 494
 495	if (WARN(!is_register && PageCompound(old_page),
 496		 "uprobe unregister should never work on compound page\n")) {
 497		ret = -EINVAL;
 498		goto put_old;
 499	}
 500
 501	/* We are going to replace instruction, update ref_ctr. */
 502	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
 503		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
 504		if (ret)
 505			goto put_old;
 506
 507		ref_ctr_updated = 1;
 508	}
 509
 510	ret = 0;
 511	if (!is_register && !PageAnon(old_page))
 512		goto put_old;
 513
 514	ret = anon_vma_prepare(vma);
 515	if (ret)
 516		goto put_old;
 517
 518	ret = -ENOMEM;
 519	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
 520	if (!new_page)
 521		goto put_old;
 522
 523	__SetPageUptodate(new_page);
 524	copy_highpage(new_page, old_page);
 525	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 526
 527	if (!is_register) {
 528		struct page *orig_page;
 529		pgoff_t index;
 530
 531		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
 532
 533		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
 534		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
 535					  index);
 536
 537		if (orig_page) {
 538			if (PageUptodate(orig_page) &&
 539			    pages_identical(new_page, orig_page)) {
 540				/* let go new_page */
 541				put_page(new_page);
 542				new_page = NULL;
 543
 544				if (PageCompound(orig_page))
 545					orig_page_huge = true;
 546			}
 547			put_page(orig_page);
 548		}
 549	}
 550
 551	ret = __replace_page(vma, vaddr, old_page, new_page);
 552	if (new_page)
 553		put_page(new_page);
 554put_old:
 555	put_page(old_page);
 556
 557	if (unlikely(ret == -EAGAIN))
 558		goto retry;
 559
 560	/* Revert back reference counter if instruction update failed. */
 561	if (ret && is_register && ref_ctr_updated)
 562		update_ref_ctr(uprobe, mm, -1);
 563
 564	/* try collapse pmd for compound page */
 565	if (!ret && orig_page_huge)
 566		collapse_pte_mapped_thp(mm, vaddr);
 567
 568	return ret;
 569}
 570
 571/**
 572 * set_swbp - store breakpoint at a given address.
 573 * @auprobe: arch specific probepoint information.
 574 * @mm: the probed process address space.
 575 * @vaddr: the virtual address to insert the opcode.
 576 *
 577 * For mm @mm, store the breakpoint instruction at @vaddr.
 578 * Return 0 (success) or a negative errno.
 579 */
 580int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 581{
 582	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 583}
 584
 585/**
 586 * set_orig_insn - Restore the original instruction.
 587 * @mm: the probed process address space.
 588 * @auprobe: arch specific probepoint information.
 589 * @vaddr: the virtual address to insert the opcode.
 590 *
 591 * For mm @mm, restore the original opcode (opcode) at @vaddr.
 592 * Return 0 (success) or a negative errno.
 593 */
 594int __weak
 595set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 596{
 597	return uprobe_write_opcode(auprobe, mm, vaddr,
 598			*(uprobe_opcode_t *)&auprobe->insn);
 599}
 600
 601static struct uprobe *get_uprobe(struct uprobe *uprobe)
 602{
 603	refcount_inc(&uprobe->ref);
 604	return uprobe;
 605}
 606
 607static void put_uprobe(struct uprobe *uprobe)
 608{
 609	if (refcount_dec_and_test(&uprobe->ref)) {
 610		/*
 611		 * If application munmap(exec_vma) before uprobe_unregister()
 612		 * gets called, we don't get a chance to remove uprobe from
 613		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
 614		 */
 615		mutex_lock(&delayed_uprobe_lock);
 616		delayed_uprobe_remove(uprobe, NULL);
 617		mutex_unlock(&delayed_uprobe_lock);
 618		kfree(uprobe);
 619	}
 620}
 621
 622static int match_uprobe(struct uprobe *l, struct uprobe *r)
 
 
 623{
 624	if (l->inode < r->inode)
 625		return -1;
 626
 627	if (l->inode > r->inode)
 628		return 1;
 629
 630	if (l->offset < r->offset)
 631		return -1;
 632
 633	if (l->offset > r->offset)
 634		return 1;
 635
 636	return 0;
 637}
 638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
 640{
 641	struct uprobe u = { .inode = inode, .offset = offset };
 642	struct rb_node *n = uprobes_tree.rb_node;
 643	struct uprobe *uprobe;
 644	int match;
 
 645
 646	while (n) {
 647		uprobe = rb_entry(n, struct uprobe, rb_node);
 648		match = match_uprobe(&u, uprobe);
 649		if (!match)
 650			return get_uprobe(uprobe);
 651
 652		if (match < 0)
 653			n = n->rb_left;
 654		else
 655			n = n->rb_right;
 656	}
 657	return NULL;
 658}
 659
 660/*
 661 * Find a uprobe corresponding to a given inode:offset
 662 * Acquires uprobes_treelock
 663 */
 664static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 665{
 666	struct uprobe *uprobe;
 667
 668	spin_lock(&uprobes_treelock);
 669	uprobe = __find_uprobe(inode, offset);
 670	spin_unlock(&uprobes_treelock);
 671
 672	return uprobe;
 673}
 674
 675static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
 676{
 677	struct rb_node **p = &uprobes_tree.rb_node;
 678	struct rb_node *parent = NULL;
 679	struct uprobe *u;
 680	int match;
 681
 682	while (*p) {
 683		parent = *p;
 684		u = rb_entry(parent, struct uprobe, rb_node);
 685		match = match_uprobe(uprobe, u);
 686		if (!match)
 687			return get_uprobe(u);
 688
 689		if (match < 0)
 690			p = &parent->rb_left;
 691		else
 692			p = &parent->rb_right;
 693
 694	}
 695
 696	u = NULL;
 697	rb_link_node(&uprobe->rb_node, parent, p);
 698	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
 699	/* get access + creation ref */
 700	refcount_set(&uprobe->ref, 2);
 701
 702	return u;
 703}
 704
 705/*
 706 * Acquire uprobes_treelock.
 707 * Matching uprobe already exists in rbtree;
 708 *	increment (access refcount) and return the matching uprobe.
 709 *
 710 * No matching uprobe; insert the uprobe in rb_tree;
 711 *	get a double refcount (access + creation) and return NULL.
 712 */
 713static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 714{
 715	struct uprobe *u;
 716
 717	spin_lock(&uprobes_treelock);
 718	u = __insert_uprobe(uprobe);
 719	spin_unlock(&uprobes_treelock);
 720
 721	return u;
 722}
 723
 724static void
 725ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
 726{
 727	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
 728		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
 729		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
 730		(unsigned long long) cur_uprobe->ref_ctr_offset,
 731		(unsigned long long) uprobe->ref_ctr_offset);
 732}
 733
 734static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
 735				   loff_t ref_ctr_offset)
 736{
 737	struct uprobe *uprobe, *cur_uprobe;
 738
 739	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
 740	if (!uprobe)
 741		return NULL;
 742
 743	uprobe->inode = inode;
 744	uprobe->offset = offset;
 745	uprobe->ref_ctr_offset = ref_ctr_offset;
 746	init_rwsem(&uprobe->register_rwsem);
 747	init_rwsem(&uprobe->consumer_rwsem);
 748
 749	/* add to uprobes_tree, sorted on inode:offset */
 750	cur_uprobe = insert_uprobe(uprobe);
 751	/* a uprobe exists for this inode:offset combination */
 752	if (cur_uprobe) {
 753		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
 754			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
 755			put_uprobe(cur_uprobe);
 756			kfree(uprobe);
 757			return ERR_PTR(-EINVAL);
 758		}
 759		kfree(uprobe);
 760		uprobe = cur_uprobe;
 761	}
 762
 763	return uprobe;
 764}
 765
 766static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 767{
 768	down_write(&uprobe->consumer_rwsem);
 769	uc->next = uprobe->consumers;
 770	uprobe->consumers = uc;
 771	up_write(&uprobe->consumer_rwsem);
 772}
 773
 774/*
 775 * For uprobe @uprobe, delete the consumer @uc.
 776 * Return true if the @uc is deleted successfully
 777 * or return false.
 778 */
 779static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 780{
 781	struct uprobe_consumer **con;
 782	bool ret = false;
 783
 784	down_write(&uprobe->consumer_rwsem);
 785	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
 786		if (*con == uc) {
 787			*con = uc->next;
 788			ret = true;
 789			break;
 790		}
 791	}
 792	up_write(&uprobe->consumer_rwsem);
 793
 794	return ret;
 795}
 796
 797static int __copy_insn(struct address_space *mapping, struct file *filp,
 798			void *insn, int nbytes, loff_t offset)
 799{
 800	struct page *page;
 801	/*
 802	 * Ensure that the page that has the original instruction is populated
 803	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
 804	 * see uprobe_register().
 805	 */
 806	if (mapping->a_ops->readpage)
 807		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
 808	else
 809		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 810	if (IS_ERR(page))
 811		return PTR_ERR(page);
 812
 813	copy_from_page(page, offset, insn, nbytes);
 814	put_page(page);
 815
 816	return 0;
 817}
 818
 819static int copy_insn(struct uprobe *uprobe, struct file *filp)
 820{
 821	struct address_space *mapping = uprobe->inode->i_mapping;
 822	loff_t offs = uprobe->offset;
 823	void *insn = &uprobe->arch.insn;
 824	int size = sizeof(uprobe->arch.insn);
 825	int len, err = -EIO;
 826
 827	/* Copy only available bytes, -EIO if nothing was read */
 828	do {
 829		if (offs >= i_size_read(uprobe->inode))
 830			break;
 831
 832		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
 833		err = __copy_insn(mapping, filp, insn, len, offs);
 834		if (err)
 835			break;
 836
 837		insn += len;
 838		offs += len;
 839		size -= len;
 840	} while (size);
 841
 842	return err;
 843}
 844
 845static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
 846				struct mm_struct *mm, unsigned long vaddr)
 847{
 848	int ret = 0;
 849
 850	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 851		return ret;
 852
 853	/* TODO: move this into _register, until then we abuse this sem. */
 854	down_write(&uprobe->consumer_rwsem);
 855	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 856		goto out;
 857
 858	ret = copy_insn(uprobe, file);
 859	if (ret)
 860		goto out;
 861
 862	ret = -ENOTSUPP;
 863	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
 864		goto out;
 865
 866	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
 867	if (ret)
 868		goto out;
 869
 870	/* uprobe_write_opcode() assumes we don't cross page boundary */
 871	BUG_ON((uprobe->offset & ~PAGE_MASK) +
 872			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
 873
 874	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
 875	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 876
 877 out:
 878	up_write(&uprobe->consumer_rwsem);
 879
 880	return ret;
 881}
 882
 883static inline bool consumer_filter(struct uprobe_consumer *uc,
 884				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 885{
 886	return !uc->filter || uc->filter(uc, ctx, mm);
 887}
 888
 889static bool filter_chain(struct uprobe *uprobe,
 890			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 891{
 892	struct uprobe_consumer *uc;
 893	bool ret = false;
 894
 895	down_read(&uprobe->consumer_rwsem);
 896	for (uc = uprobe->consumers; uc; uc = uc->next) {
 897		ret = consumer_filter(uc, ctx, mm);
 898		if (ret)
 899			break;
 900	}
 901	up_read(&uprobe->consumer_rwsem);
 902
 903	return ret;
 904}
 905
 906static int
 907install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 908			struct vm_area_struct *vma, unsigned long vaddr)
 909{
 910	bool first_uprobe;
 911	int ret;
 912
 913	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
 914	if (ret)
 915		return ret;
 916
 917	/*
 918	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
 919	 * the task can hit this breakpoint right after __replace_page().
 920	 */
 921	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
 922	if (first_uprobe)
 923		set_bit(MMF_HAS_UPROBES, &mm->flags);
 924
 925	ret = set_swbp(&uprobe->arch, mm, vaddr);
 926	if (!ret)
 927		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
 928	else if (first_uprobe)
 929		clear_bit(MMF_HAS_UPROBES, &mm->flags);
 930
 931	return ret;
 932}
 933
 934static int
 935remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 936{
 937	set_bit(MMF_RECALC_UPROBES, &mm->flags);
 938	return set_orig_insn(&uprobe->arch, mm, vaddr);
 939}
 940
 941static inline bool uprobe_is_active(struct uprobe *uprobe)
 942{
 943	return !RB_EMPTY_NODE(&uprobe->rb_node);
 944}
 945/*
 946 * There could be threads that have already hit the breakpoint. They
 947 * will recheck the current insn and restart if find_uprobe() fails.
 948 * See find_active_uprobe().
 949 */
 950static void delete_uprobe(struct uprobe *uprobe)
 951{
 952	if (WARN_ON(!uprobe_is_active(uprobe)))
 953		return;
 954
 955	spin_lock(&uprobes_treelock);
 956	rb_erase(&uprobe->rb_node, &uprobes_tree);
 957	spin_unlock(&uprobes_treelock);
 958	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
 959	put_uprobe(uprobe);
 960}
 961
 962struct map_info {
 963	struct map_info *next;
 964	struct mm_struct *mm;
 965	unsigned long vaddr;
 966};
 967
 968static inline struct map_info *free_map_info(struct map_info *info)
 969{
 970	struct map_info *next = info->next;
 971	kfree(info);
 972	return next;
 973}
 974
 975static struct map_info *
 976build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
 977{
 978	unsigned long pgoff = offset >> PAGE_SHIFT;
 979	struct vm_area_struct *vma;
 980	struct map_info *curr = NULL;
 981	struct map_info *prev = NULL;
 982	struct map_info *info;
 983	int more = 0;
 984
 985 again:
 986	i_mmap_lock_read(mapping);
 987	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
 988		if (!valid_vma(vma, is_register))
 989			continue;
 990
 991		if (!prev && !more) {
 992			/*
 993			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
 994			 * reclaim. This is optimistic, no harm done if it fails.
 995			 */
 996			prev = kmalloc(sizeof(struct map_info),
 997					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
 998			if (prev)
 999				prev->next = NULL;
1000		}
1001		if (!prev) {
1002			more++;
1003			continue;
1004		}
1005
1006		if (!mmget_not_zero(vma->vm_mm))
1007			continue;
1008
1009		info = prev;
1010		prev = prev->next;
1011		info->next = curr;
1012		curr = info;
1013
1014		info->mm = vma->vm_mm;
1015		info->vaddr = offset_to_vaddr(vma, offset);
1016	}
1017	i_mmap_unlock_read(mapping);
1018
1019	if (!more)
1020		goto out;
1021
1022	prev = curr;
1023	while (curr) {
1024		mmput(curr->mm);
1025		curr = curr->next;
1026	}
1027
1028	do {
1029		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1030		if (!info) {
1031			curr = ERR_PTR(-ENOMEM);
1032			goto out;
1033		}
1034		info->next = prev;
1035		prev = info;
1036	} while (--more);
1037
1038	goto again;
1039 out:
1040	while (prev)
1041		prev = free_map_info(prev);
1042	return curr;
1043}
1044
1045static int
1046register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1047{
1048	bool is_register = !!new;
1049	struct map_info *info;
1050	int err = 0;
1051
1052	percpu_down_write(&dup_mmap_sem);
1053	info = build_map_info(uprobe->inode->i_mapping,
1054					uprobe->offset, is_register);
1055	if (IS_ERR(info)) {
1056		err = PTR_ERR(info);
1057		goto out;
1058	}
1059
1060	while (info) {
1061		struct mm_struct *mm = info->mm;
1062		struct vm_area_struct *vma;
1063
1064		if (err && is_register)
1065			goto free;
1066
1067		down_write(&mm->mmap_sem);
1068		vma = find_vma(mm, info->vaddr);
1069		if (!vma || !valid_vma(vma, is_register) ||
1070		    file_inode(vma->vm_file) != uprobe->inode)
1071			goto unlock;
1072
1073		if (vma->vm_start > info->vaddr ||
1074		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1075			goto unlock;
1076
1077		if (is_register) {
1078			/* consult only the "caller", new consumer. */
1079			if (consumer_filter(new,
1080					UPROBE_FILTER_REGISTER, mm))
1081				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1082		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1083			if (!filter_chain(uprobe,
1084					UPROBE_FILTER_UNREGISTER, mm))
1085				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1086		}
1087
1088 unlock:
1089		up_write(&mm->mmap_sem);
1090 free:
1091		mmput(mm);
1092		info = free_map_info(info);
1093	}
1094 out:
1095	percpu_up_write(&dup_mmap_sem);
1096	return err;
1097}
1098
1099static void
1100__uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1101{
1102	int err;
1103
1104	if (WARN_ON(!consumer_del(uprobe, uc)))
1105		return;
1106
1107	err = register_for_each_vma(uprobe, NULL);
1108	/* TODO : cant unregister? schedule a worker thread */
1109	if (!uprobe->consumers && !err)
1110		delete_uprobe(uprobe);
1111}
1112
1113/*
1114 * uprobe_unregister - unregister an already registered probe.
1115 * @inode: the file in which the probe has to be removed.
1116 * @offset: offset from the start of the file.
1117 * @uc: identify which probe if multiple probes are colocated.
1118 */
1119void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1120{
1121	struct uprobe *uprobe;
1122
1123	uprobe = find_uprobe(inode, offset);
1124	if (WARN_ON(!uprobe))
1125		return;
1126
1127	down_write(&uprobe->register_rwsem);
1128	__uprobe_unregister(uprobe, uc);
1129	up_write(&uprobe->register_rwsem);
1130	put_uprobe(uprobe);
1131}
1132EXPORT_SYMBOL_GPL(uprobe_unregister);
1133
1134/*
1135 * __uprobe_register - register a probe
1136 * @inode: the file in which the probe has to be placed.
1137 * @offset: offset from the start of the file.
1138 * @uc: information on howto handle the probe..
1139 *
1140 * Apart from the access refcount, __uprobe_register() takes a creation
1141 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1142 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1143 * tuple).  Creation refcount stops uprobe_unregister from freeing the
1144 * @uprobe even before the register operation is complete. Creation
1145 * refcount is released when the last @uc for the @uprobe
1146 * unregisters. Caller of __uprobe_register() is required to keep @inode
1147 * (and the containing mount) referenced.
1148 *
1149 * Return errno if it cannot successully install probes
1150 * else return 0 (success)
1151 */
1152static int __uprobe_register(struct inode *inode, loff_t offset,
1153			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1154{
1155	struct uprobe *uprobe;
1156	int ret;
1157
1158	/* Uprobe must have at least one set consumer */
1159	if (!uc->handler && !uc->ret_handler)
1160		return -EINVAL;
1161
1162	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1163	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
 
1164		return -EIO;
1165	/* Racy, just to catch the obvious mistakes */
1166	if (offset > i_size_read(inode))
1167		return -EINVAL;
1168
 
 
 
 
 
 
 
 
 
1169 retry:
1170	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1171	if (!uprobe)
1172		return -ENOMEM;
1173	if (IS_ERR(uprobe))
1174		return PTR_ERR(uprobe);
1175
1176	/*
1177	 * We can race with uprobe_unregister()->delete_uprobe().
1178	 * Check uprobe_is_active() and retry if it is false.
1179	 */
1180	down_write(&uprobe->register_rwsem);
1181	ret = -EAGAIN;
1182	if (likely(uprobe_is_active(uprobe))) {
1183		consumer_add(uprobe, uc);
1184		ret = register_for_each_vma(uprobe, uc);
1185		if (ret)
1186			__uprobe_unregister(uprobe, uc);
1187	}
1188	up_write(&uprobe->register_rwsem);
1189	put_uprobe(uprobe);
1190
1191	if (unlikely(ret == -EAGAIN))
1192		goto retry;
1193	return ret;
1194}
1195
1196int uprobe_register(struct inode *inode, loff_t offset,
1197		    struct uprobe_consumer *uc)
1198{
1199	return __uprobe_register(inode, offset, 0, uc);
1200}
1201EXPORT_SYMBOL_GPL(uprobe_register);
1202
1203int uprobe_register_refctr(struct inode *inode, loff_t offset,
1204			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1205{
1206	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1207}
1208EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1209
1210/*
1211 * uprobe_apply - unregister an already registered probe.
1212 * @inode: the file in which the probe has to be removed.
1213 * @offset: offset from the start of the file.
1214 * @uc: consumer which wants to add more or remove some breakpoints
1215 * @add: add or remove the breakpoints
1216 */
1217int uprobe_apply(struct inode *inode, loff_t offset,
1218			struct uprobe_consumer *uc, bool add)
1219{
1220	struct uprobe *uprobe;
1221	struct uprobe_consumer *con;
1222	int ret = -ENOENT;
1223
1224	uprobe = find_uprobe(inode, offset);
1225	if (WARN_ON(!uprobe))
1226		return ret;
1227
1228	down_write(&uprobe->register_rwsem);
1229	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1230		;
1231	if (con)
1232		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1233	up_write(&uprobe->register_rwsem);
1234	put_uprobe(uprobe);
1235
1236	return ret;
1237}
1238
1239static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1240{
 
1241	struct vm_area_struct *vma;
1242	int err = 0;
1243
1244	down_read(&mm->mmap_sem);
1245	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1246		unsigned long vaddr;
1247		loff_t offset;
1248
1249		if (!valid_vma(vma, false) ||
1250		    file_inode(vma->vm_file) != uprobe->inode)
1251			continue;
1252
1253		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1254		if (uprobe->offset <  offset ||
1255		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1256			continue;
1257
1258		vaddr = offset_to_vaddr(vma, uprobe->offset);
1259		err |= remove_breakpoint(uprobe, mm, vaddr);
1260	}
1261	up_read(&mm->mmap_sem);
1262
1263	return err;
1264}
1265
1266static struct rb_node *
1267find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1268{
1269	struct rb_node *n = uprobes_tree.rb_node;
1270
1271	while (n) {
1272		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1273
1274		if (inode < u->inode) {
1275			n = n->rb_left;
1276		} else if (inode > u->inode) {
1277			n = n->rb_right;
1278		} else {
1279			if (max < u->offset)
1280				n = n->rb_left;
1281			else if (min > u->offset)
1282				n = n->rb_right;
1283			else
1284				break;
1285		}
1286	}
1287
1288	return n;
1289}
1290
1291/*
1292 * For a given range in vma, build a list of probes that need to be inserted.
1293 */
1294static void build_probe_list(struct inode *inode,
1295				struct vm_area_struct *vma,
1296				unsigned long start, unsigned long end,
1297				struct list_head *head)
1298{
1299	loff_t min, max;
1300	struct rb_node *n, *t;
1301	struct uprobe *u;
1302
1303	INIT_LIST_HEAD(head);
1304	min = vaddr_to_offset(vma, start);
1305	max = min + (end - start) - 1;
1306
1307	spin_lock(&uprobes_treelock);
1308	n = find_node_in_range(inode, min, max);
1309	if (n) {
1310		for (t = n; t; t = rb_prev(t)) {
1311			u = rb_entry(t, struct uprobe, rb_node);
1312			if (u->inode != inode || u->offset < min)
1313				break;
1314			list_add(&u->pending_list, head);
1315			get_uprobe(u);
1316		}
1317		for (t = n; (t = rb_next(t)); ) {
1318			u = rb_entry(t, struct uprobe, rb_node);
1319			if (u->inode != inode || u->offset > max)
1320				break;
1321			list_add(&u->pending_list, head);
1322			get_uprobe(u);
1323		}
1324	}
1325	spin_unlock(&uprobes_treelock);
1326}
1327
1328/* @vma contains reference counter, not the probed instruction. */
1329static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1330{
1331	struct list_head *pos, *q;
1332	struct delayed_uprobe *du;
1333	unsigned long vaddr;
1334	int ret = 0, err = 0;
1335
1336	mutex_lock(&delayed_uprobe_lock);
1337	list_for_each_safe(pos, q, &delayed_uprobe_list) {
1338		du = list_entry(pos, struct delayed_uprobe, list);
1339
1340		if (du->mm != vma->vm_mm ||
1341		    !valid_ref_ctr_vma(du->uprobe, vma))
1342			continue;
1343
1344		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1345		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1346		if (ret) {
1347			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1348			if (!err)
1349				err = ret;
1350		}
1351		delayed_uprobe_delete(du);
1352	}
1353	mutex_unlock(&delayed_uprobe_lock);
1354	return err;
1355}
1356
1357/*
1358 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1359 *
1360 * Currently we ignore all errors and always return 0, the callers
1361 * can't handle the failure anyway.
1362 */
1363int uprobe_mmap(struct vm_area_struct *vma)
1364{
1365	struct list_head tmp_list;
1366	struct uprobe *uprobe, *u;
1367	struct inode *inode;
1368
1369	if (no_uprobe_events())
1370		return 0;
1371
1372	if (vma->vm_file &&
1373	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1374	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1375		delayed_ref_ctr_inc(vma);
1376
1377	if (!valid_vma(vma, true))
1378		return 0;
1379
1380	inode = file_inode(vma->vm_file);
1381	if (!inode)
1382		return 0;
1383
1384	mutex_lock(uprobes_mmap_hash(inode));
1385	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1386	/*
1387	 * We can race with uprobe_unregister(), this uprobe can be already
1388	 * removed. But in this case filter_chain() must return false, all
1389	 * consumers have gone away.
1390	 */
1391	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1392		if (!fatal_signal_pending(current) &&
1393		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1394			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1395			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1396		}
1397		put_uprobe(uprobe);
1398	}
1399	mutex_unlock(uprobes_mmap_hash(inode));
1400
1401	return 0;
1402}
1403
1404static bool
1405vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1406{
1407	loff_t min, max;
1408	struct inode *inode;
1409	struct rb_node *n;
1410
1411	inode = file_inode(vma->vm_file);
1412
1413	min = vaddr_to_offset(vma, start);
1414	max = min + (end - start) - 1;
1415
1416	spin_lock(&uprobes_treelock);
1417	n = find_node_in_range(inode, min, max);
1418	spin_unlock(&uprobes_treelock);
1419
1420	return !!n;
1421}
1422
1423/*
1424 * Called in context of a munmap of a vma.
1425 */
1426void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1427{
1428	if (no_uprobe_events() || !valid_vma(vma, false))
1429		return;
1430
1431	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1432		return;
1433
1434	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1435	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1436		return;
1437
1438	if (vma_has_uprobes(vma, start, end))
1439		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1440}
1441
1442/* Slot allocation for XOL */
1443static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1444{
1445	struct vm_area_struct *vma;
1446	int ret;
1447
1448	if (down_write_killable(&mm->mmap_sem))
1449		return -EINTR;
1450
1451	if (mm->uprobes_state.xol_area) {
1452		ret = -EALREADY;
1453		goto fail;
1454	}
1455
1456	if (!area->vaddr) {
1457		/* Try to map as high as possible, this is only a hint. */
1458		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1459						PAGE_SIZE, 0, 0);
1460		if (area->vaddr & ~PAGE_MASK) {
1461			ret = area->vaddr;
1462			goto fail;
1463		}
1464	}
1465
1466	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1467				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1468				&area->xol_mapping);
1469	if (IS_ERR(vma)) {
1470		ret = PTR_ERR(vma);
1471		goto fail;
1472	}
1473
1474	ret = 0;
1475	/* pairs with get_xol_area() */
1476	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1477 fail:
1478	up_write(&mm->mmap_sem);
1479
1480	return ret;
1481}
1482
1483static struct xol_area *__create_xol_area(unsigned long vaddr)
1484{
1485	struct mm_struct *mm = current->mm;
1486	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1487	struct xol_area *area;
1488
1489	area = kmalloc(sizeof(*area), GFP_KERNEL);
1490	if (unlikely(!area))
1491		goto out;
1492
1493	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1494			       GFP_KERNEL);
1495	if (!area->bitmap)
1496		goto free_area;
1497
1498	area->xol_mapping.name = "[uprobes]";
1499	area->xol_mapping.fault = NULL;
1500	area->xol_mapping.pages = area->pages;
1501	area->pages[0] = alloc_page(GFP_HIGHUSER);
1502	if (!area->pages[0])
1503		goto free_bitmap;
1504	area->pages[1] = NULL;
1505
1506	area->vaddr = vaddr;
1507	init_waitqueue_head(&area->wq);
1508	/* Reserve the 1st slot for get_trampoline_vaddr() */
1509	set_bit(0, area->bitmap);
1510	atomic_set(&area->slot_count, 1);
1511	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1512
1513	if (!xol_add_vma(mm, area))
1514		return area;
1515
1516	__free_page(area->pages[0]);
1517 free_bitmap:
1518	kfree(area->bitmap);
1519 free_area:
1520	kfree(area);
1521 out:
1522	return NULL;
1523}
1524
1525/*
1526 * get_xol_area - Allocate process's xol_area if necessary.
1527 * This area will be used for storing instructions for execution out of line.
1528 *
1529 * Returns the allocated area or NULL.
1530 */
1531static struct xol_area *get_xol_area(void)
1532{
1533	struct mm_struct *mm = current->mm;
1534	struct xol_area *area;
1535
1536	if (!mm->uprobes_state.xol_area)
1537		__create_xol_area(0);
1538
1539	/* Pairs with xol_add_vma() smp_store_release() */
1540	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1541	return area;
1542}
1543
1544/*
1545 * uprobe_clear_state - Free the area allocated for slots.
1546 */
1547void uprobe_clear_state(struct mm_struct *mm)
1548{
1549	struct xol_area *area = mm->uprobes_state.xol_area;
1550
1551	mutex_lock(&delayed_uprobe_lock);
1552	delayed_uprobe_remove(NULL, mm);
1553	mutex_unlock(&delayed_uprobe_lock);
1554
1555	if (!area)
1556		return;
1557
1558	put_page(area->pages[0]);
1559	kfree(area->bitmap);
1560	kfree(area);
1561}
1562
1563void uprobe_start_dup_mmap(void)
1564{
1565	percpu_down_read(&dup_mmap_sem);
1566}
1567
1568void uprobe_end_dup_mmap(void)
1569{
1570	percpu_up_read(&dup_mmap_sem);
1571}
1572
1573void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1574{
1575	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1576		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1577		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1578		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1579	}
1580}
1581
1582/*
1583 *  - search for a free slot.
1584 */
1585static unsigned long xol_take_insn_slot(struct xol_area *area)
1586{
1587	unsigned long slot_addr;
1588	int slot_nr;
1589
1590	do {
1591		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1592		if (slot_nr < UINSNS_PER_PAGE) {
1593			if (!test_and_set_bit(slot_nr, area->bitmap))
1594				break;
1595
1596			slot_nr = UINSNS_PER_PAGE;
1597			continue;
1598		}
1599		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1600	} while (slot_nr >= UINSNS_PER_PAGE);
1601
1602	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1603	atomic_inc(&area->slot_count);
1604
1605	return slot_addr;
1606}
1607
1608/*
1609 * xol_get_insn_slot - allocate a slot for xol.
1610 * Returns the allocated slot address or 0.
1611 */
1612static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1613{
1614	struct xol_area *area;
1615	unsigned long xol_vaddr;
1616
1617	area = get_xol_area();
1618	if (!area)
1619		return 0;
1620
1621	xol_vaddr = xol_take_insn_slot(area);
1622	if (unlikely(!xol_vaddr))
1623		return 0;
1624
1625	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1626			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1627
1628	return xol_vaddr;
1629}
1630
1631/*
1632 * xol_free_insn_slot - If slot was earlier allocated by
1633 * @xol_get_insn_slot(), make the slot available for
1634 * subsequent requests.
1635 */
1636static void xol_free_insn_slot(struct task_struct *tsk)
1637{
1638	struct xol_area *area;
1639	unsigned long vma_end;
1640	unsigned long slot_addr;
1641
1642	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1643		return;
1644
1645	slot_addr = tsk->utask->xol_vaddr;
1646	if (unlikely(!slot_addr))
1647		return;
1648
1649	area = tsk->mm->uprobes_state.xol_area;
1650	vma_end = area->vaddr + PAGE_SIZE;
1651	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1652		unsigned long offset;
1653		int slot_nr;
1654
1655		offset = slot_addr - area->vaddr;
1656		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1657		if (slot_nr >= UINSNS_PER_PAGE)
1658			return;
1659
1660		clear_bit(slot_nr, area->bitmap);
1661		atomic_dec(&area->slot_count);
1662		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1663		if (waitqueue_active(&area->wq))
1664			wake_up(&area->wq);
1665
1666		tsk->utask->xol_vaddr = 0;
1667	}
1668}
1669
1670void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1671				  void *src, unsigned long len)
1672{
1673	/* Initialize the slot */
1674	copy_to_page(page, vaddr, src, len);
1675
1676	/*
1677	 * We probably need flush_icache_user_range() but it needs vma.
1678	 * This should work on most of architectures by default. If
1679	 * architecture needs to do something different it can define
1680	 * its own version of the function.
1681	 */
1682	flush_dcache_page(page);
1683}
1684
1685/**
1686 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1687 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1688 * instruction.
1689 * Return the address of the breakpoint instruction.
1690 */
1691unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1692{
1693	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1694}
1695
1696unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1697{
1698	struct uprobe_task *utask = current->utask;
1699
1700	if (unlikely(utask && utask->active_uprobe))
1701		return utask->vaddr;
1702
1703	return instruction_pointer(regs);
1704}
1705
1706static struct return_instance *free_ret_instance(struct return_instance *ri)
1707{
1708	struct return_instance *next = ri->next;
1709	put_uprobe(ri->uprobe);
1710	kfree(ri);
1711	return next;
1712}
1713
1714/*
1715 * Called with no locks held.
1716 * Called in context of an exiting or an exec-ing thread.
1717 */
1718void uprobe_free_utask(struct task_struct *t)
1719{
1720	struct uprobe_task *utask = t->utask;
1721	struct return_instance *ri;
1722
1723	if (!utask)
1724		return;
1725
1726	if (utask->active_uprobe)
1727		put_uprobe(utask->active_uprobe);
1728
1729	ri = utask->return_instances;
1730	while (ri)
1731		ri = free_ret_instance(ri);
1732
1733	xol_free_insn_slot(t);
1734	kfree(utask);
1735	t->utask = NULL;
1736}
1737
1738/*
1739 * Allocate a uprobe_task object for the task if if necessary.
1740 * Called when the thread hits a breakpoint.
1741 *
1742 * Returns:
1743 * - pointer to new uprobe_task on success
1744 * - NULL otherwise
1745 */
1746static struct uprobe_task *get_utask(void)
1747{
1748	if (!current->utask)
1749		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1750	return current->utask;
1751}
1752
1753static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1754{
1755	struct uprobe_task *n_utask;
1756	struct return_instance **p, *o, *n;
1757
1758	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1759	if (!n_utask)
1760		return -ENOMEM;
1761	t->utask = n_utask;
1762
1763	p = &n_utask->return_instances;
1764	for (o = o_utask->return_instances; o; o = o->next) {
1765		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1766		if (!n)
1767			return -ENOMEM;
1768
1769		*n = *o;
1770		get_uprobe(n->uprobe);
1771		n->next = NULL;
1772
1773		*p = n;
1774		p = &n->next;
1775		n_utask->depth++;
1776	}
1777
1778	return 0;
1779}
1780
1781static void uprobe_warn(struct task_struct *t, const char *msg)
1782{
1783	pr_warn("uprobe: %s:%d failed to %s\n",
1784			current->comm, current->pid, msg);
1785}
1786
1787static void dup_xol_work(struct callback_head *work)
1788{
1789	if (current->flags & PF_EXITING)
1790		return;
1791
1792	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1793			!fatal_signal_pending(current))
1794		uprobe_warn(current, "dup xol area");
1795}
1796
1797/*
1798 * Called in context of a new clone/fork from copy_process.
1799 */
1800void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1801{
1802	struct uprobe_task *utask = current->utask;
1803	struct mm_struct *mm = current->mm;
1804	struct xol_area *area;
1805
1806	t->utask = NULL;
1807
1808	if (!utask || !utask->return_instances)
1809		return;
1810
1811	if (mm == t->mm && !(flags & CLONE_VFORK))
1812		return;
1813
1814	if (dup_utask(t, utask))
1815		return uprobe_warn(t, "dup ret instances");
1816
1817	/* The task can fork() after dup_xol_work() fails */
1818	area = mm->uprobes_state.xol_area;
1819	if (!area)
1820		return uprobe_warn(t, "dup xol area");
1821
1822	if (mm == t->mm)
1823		return;
1824
1825	t->utask->dup_xol_addr = area->vaddr;
1826	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1827	task_work_add(t, &t->utask->dup_xol_work, true);
1828}
1829
1830/*
1831 * Current area->vaddr notion assume the trampoline address is always
1832 * equal area->vaddr.
1833 *
1834 * Returns -1 in case the xol_area is not allocated.
1835 */
1836static unsigned long get_trampoline_vaddr(void)
1837{
1838	struct xol_area *area;
1839	unsigned long trampoline_vaddr = -1;
1840
1841	/* Pairs with xol_add_vma() smp_store_release() */
1842	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1843	if (area)
1844		trampoline_vaddr = area->vaddr;
1845
1846	return trampoline_vaddr;
1847}
1848
1849static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1850					struct pt_regs *regs)
1851{
1852	struct return_instance *ri = utask->return_instances;
1853	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1854
1855	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1856		ri = free_ret_instance(ri);
1857		utask->depth--;
1858	}
1859	utask->return_instances = ri;
1860}
1861
1862static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1863{
1864	struct return_instance *ri;
1865	struct uprobe_task *utask;
1866	unsigned long orig_ret_vaddr, trampoline_vaddr;
1867	bool chained;
1868
1869	if (!get_xol_area())
1870		return;
1871
1872	utask = get_utask();
1873	if (!utask)
1874		return;
1875
1876	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1877		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1878				" nestedness limit pid/tgid=%d/%d\n",
1879				current->pid, current->tgid);
1880		return;
1881	}
1882
1883	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1884	if (!ri)
1885		return;
1886
1887	trampoline_vaddr = get_trampoline_vaddr();
1888	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1889	if (orig_ret_vaddr == -1)
1890		goto fail;
1891
1892	/* drop the entries invalidated by longjmp() */
1893	chained = (orig_ret_vaddr == trampoline_vaddr);
1894	cleanup_return_instances(utask, chained, regs);
1895
1896	/*
1897	 * We don't want to keep trampoline address in stack, rather keep the
1898	 * original return address of first caller thru all the consequent
1899	 * instances. This also makes breakpoint unwrapping easier.
1900	 */
1901	if (chained) {
1902		if (!utask->return_instances) {
1903			/*
1904			 * This situation is not possible. Likely we have an
1905			 * attack from user-space.
1906			 */
1907			uprobe_warn(current, "handle tail call");
1908			goto fail;
1909		}
1910		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1911	}
1912
1913	ri->uprobe = get_uprobe(uprobe);
1914	ri->func = instruction_pointer(regs);
1915	ri->stack = user_stack_pointer(regs);
1916	ri->orig_ret_vaddr = orig_ret_vaddr;
1917	ri->chained = chained;
1918
1919	utask->depth++;
1920	ri->next = utask->return_instances;
1921	utask->return_instances = ri;
1922
1923	return;
1924 fail:
1925	kfree(ri);
1926}
1927
1928/* Prepare to single-step probed instruction out of line. */
1929static int
1930pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1931{
1932	struct uprobe_task *utask;
1933	unsigned long xol_vaddr;
1934	int err;
1935
1936	utask = get_utask();
1937	if (!utask)
1938		return -ENOMEM;
1939
1940	xol_vaddr = xol_get_insn_slot(uprobe);
1941	if (!xol_vaddr)
1942		return -ENOMEM;
1943
1944	utask->xol_vaddr = xol_vaddr;
1945	utask->vaddr = bp_vaddr;
1946
1947	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1948	if (unlikely(err)) {
1949		xol_free_insn_slot(current);
1950		return err;
1951	}
1952
1953	utask->active_uprobe = uprobe;
1954	utask->state = UTASK_SSTEP;
1955	return 0;
1956}
1957
1958/*
1959 * If we are singlestepping, then ensure this thread is not connected to
1960 * non-fatal signals until completion of singlestep.  When xol insn itself
1961 * triggers the signal,  restart the original insn even if the task is
1962 * already SIGKILL'ed (since coredump should report the correct ip).  This
1963 * is even more important if the task has a handler for SIGSEGV/etc, The
1964 * _same_ instruction should be repeated again after return from the signal
1965 * handler, and SSTEP can never finish in this case.
1966 */
1967bool uprobe_deny_signal(void)
1968{
1969	struct task_struct *t = current;
1970	struct uprobe_task *utask = t->utask;
1971
1972	if (likely(!utask || !utask->active_uprobe))
1973		return false;
1974
1975	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1976
1977	if (signal_pending(t)) {
1978		spin_lock_irq(&t->sighand->siglock);
1979		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1980		spin_unlock_irq(&t->sighand->siglock);
1981
1982		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1983			utask->state = UTASK_SSTEP_TRAPPED;
1984			set_tsk_thread_flag(t, TIF_UPROBE);
1985		}
1986	}
1987
1988	return true;
1989}
1990
1991static void mmf_recalc_uprobes(struct mm_struct *mm)
1992{
 
1993	struct vm_area_struct *vma;
1994
1995	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1996		if (!valid_vma(vma, false))
1997			continue;
1998		/*
1999		 * This is not strictly accurate, we can race with
2000		 * uprobe_unregister() and see the already removed
2001		 * uprobe if delete_uprobe() was not yet called.
2002		 * Or this uprobe can be filtered out.
2003		 */
2004		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2005			return;
2006	}
2007
2008	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2009}
2010
2011static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2012{
2013	struct page *page;
2014	uprobe_opcode_t opcode;
2015	int result;
2016
 
 
 
2017	pagefault_disable();
2018	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2019	pagefault_enable();
2020
2021	if (likely(result == 0))
2022		goto out;
2023
2024	/*
2025	 * The NULL 'tsk' here ensures that any faults that occur here
2026	 * will not be accounted to the task.  'mm' *is* current->mm,
2027	 * but we treat this as a 'remote' access since it is
2028	 * essentially a kernel access to the memory.
2029	 */
2030	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
2031			NULL, NULL);
2032	if (result < 0)
2033		return result;
2034
2035	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2036	put_page(page);
2037 out:
2038	/* This needs to return true for any variant of the trap insn */
2039	return is_trap_insn(&opcode);
2040}
2041
2042static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2043{
2044	struct mm_struct *mm = current->mm;
2045	struct uprobe *uprobe = NULL;
2046	struct vm_area_struct *vma;
2047
2048	down_read(&mm->mmap_sem);
2049	vma = find_vma(mm, bp_vaddr);
2050	if (vma && vma->vm_start <= bp_vaddr) {
2051		if (valid_vma(vma, false)) {
2052			struct inode *inode = file_inode(vma->vm_file);
2053			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2054
2055			uprobe = find_uprobe(inode, offset);
2056		}
2057
2058		if (!uprobe)
2059			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2060	} else {
2061		*is_swbp = -EFAULT;
2062	}
2063
2064	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2065		mmf_recalc_uprobes(mm);
2066	up_read(&mm->mmap_sem);
2067
2068	return uprobe;
2069}
2070
2071static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2072{
2073	struct uprobe_consumer *uc;
2074	int remove = UPROBE_HANDLER_REMOVE;
2075	bool need_prep = false; /* prepare return uprobe, when needed */
2076
2077	down_read(&uprobe->register_rwsem);
2078	for (uc = uprobe->consumers; uc; uc = uc->next) {
2079		int rc = 0;
2080
2081		if (uc->handler) {
2082			rc = uc->handler(uc, regs);
2083			WARN(rc & ~UPROBE_HANDLER_MASK,
2084				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2085		}
2086
2087		if (uc->ret_handler)
2088			need_prep = true;
2089
2090		remove &= rc;
2091	}
2092
2093	if (need_prep && !remove)
2094		prepare_uretprobe(uprobe, regs); /* put bp at return */
2095
2096	if (remove && uprobe->consumers) {
2097		WARN_ON(!uprobe_is_active(uprobe));
2098		unapply_uprobe(uprobe, current->mm);
2099	}
2100	up_read(&uprobe->register_rwsem);
2101}
2102
2103static void
2104handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2105{
2106	struct uprobe *uprobe = ri->uprobe;
2107	struct uprobe_consumer *uc;
2108
2109	down_read(&uprobe->register_rwsem);
2110	for (uc = uprobe->consumers; uc; uc = uc->next) {
2111		if (uc->ret_handler)
2112			uc->ret_handler(uc, ri->func, regs);
2113	}
2114	up_read(&uprobe->register_rwsem);
2115}
2116
2117static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2118{
2119	bool chained;
2120
2121	do {
2122		chained = ri->chained;
2123		ri = ri->next;	/* can't be NULL if chained */
2124	} while (chained);
2125
2126	return ri;
2127}
2128
2129static void handle_trampoline(struct pt_regs *regs)
2130{
2131	struct uprobe_task *utask;
2132	struct return_instance *ri, *next;
2133	bool valid;
2134
2135	utask = current->utask;
2136	if (!utask)
2137		goto sigill;
2138
2139	ri = utask->return_instances;
2140	if (!ri)
2141		goto sigill;
2142
2143	do {
2144		/*
2145		 * We should throw out the frames invalidated by longjmp().
2146		 * If this chain is valid, then the next one should be alive
2147		 * or NULL; the latter case means that nobody but ri->func
2148		 * could hit this trampoline on return. TODO: sigaltstack().
2149		 */
2150		next = find_next_ret_chain(ri);
2151		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2152
2153		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2154		do {
2155			if (valid)
2156				handle_uretprobe_chain(ri, regs);
2157			ri = free_ret_instance(ri);
2158			utask->depth--;
2159		} while (ri != next);
2160	} while (!valid);
2161
2162	utask->return_instances = ri;
2163	return;
2164
2165 sigill:
2166	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2167	force_sig(SIGILL);
2168
2169}
2170
2171bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2172{
2173	return false;
2174}
2175
2176bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2177					struct pt_regs *regs)
2178{
2179	return true;
2180}
2181
2182/*
2183 * Run handler and ask thread to singlestep.
2184 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2185 */
2186static void handle_swbp(struct pt_regs *regs)
2187{
2188	struct uprobe *uprobe;
2189	unsigned long bp_vaddr;
2190	int uninitialized_var(is_swbp);
2191
2192	bp_vaddr = uprobe_get_swbp_addr(regs);
2193	if (bp_vaddr == get_trampoline_vaddr())
2194		return handle_trampoline(regs);
2195
2196	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2197	if (!uprobe) {
2198		if (is_swbp > 0) {
2199			/* No matching uprobe; signal SIGTRAP. */
2200			send_sig(SIGTRAP, current, 0);
2201		} else {
2202			/*
2203			 * Either we raced with uprobe_unregister() or we can't
2204			 * access this memory. The latter is only possible if
2205			 * another thread plays with our ->mm. In both cases
2206			 * we can simply restart. If this vma was unmapped we
2207			 * can pretend this insn was not executed yet and get
2208			 * the (correct) SIGSEGV after restart.
2209			 */
2210			instruction_pointer_set(regs, bp_vaddr);
2211		}
2212		return;
2213	}
2214
2215	/* change it in advance for ->handler() and restart */
2216	instruction_pointer_set(regs, bp_vaddr);
2217
2218	/*
2219	 * TODO: move copy_insn/etc into _register and remove this hack.
2220	 * After we hit the bp, _unregister + _register can install the
2221	 * new and not-yet-analyzed uprobe at the same address, restart.
2222	 */
2223	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2224		goto out;
2225
2226	/*
2227	 * Pairs with the smp_wmb() in prepare_uprobe().
2228	 *
2229	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2230	 * we must also see the stores to &uprobe->arch performed by the
2231	 * prepare_uprobe() call.
2232	 */
2233	smp_rmb();
2234
2235	/* Tracing handlers use ->utask to communicate with fetch methods */
2236	if (!get_utask())
2237		goto out;
2238
2239	if (arch_uprobe_ignore(&uprobe->arch, regs))
2240		goto out;
2241
2242	handler_chain(uprobe, regs);
2243
2244	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2245		goto out;
2246
2247	if (!pre_ssout(uprobe, regs, bp_vaddr))
2248		return;
2249
2250	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2251out:
2252	put_uprobe(uprobe);
2253}
2254
2255/*
2256 * Perform required fix-ups and disable singlestep.
2257 * Allow pending signals to take effect.
2258 */
2259static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2260{
2261	struct uprobe *uprobe;
2262	int err = 0;
2263
2264	uprobe = utask->active_uprobe;
2265	if (utask->state == UTASK_SSTEP_ACK)
2266		err = arch_uprobe_post_xol(&uprobe->arch, regs);
2267	else if (utask->state == UTASK_SSTEP_TRAPPED)
2268		arch_uprobe_abort_xol(&uprobe->arch, regs);
2269	else
2270		WARN_ON_ONCE(1);
2271
2272	put_uprobe(uprobe);
2273	utask->active_uprobe = NULL;
2274	utask->state = UTASK_RUNNING;
2275	xol_free_insn_slot(current);
2276
2277	spin_lock_irq(&current->sighand->siglock);
2278	recalc_sigpending(); /* see uprobe_deny_signal() */
2279	spin_unlock_irq(&current->sighand->siglock);
2280
2281	if (unlikely(err)) {
2282		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2283		force_sig(SIGILL);
2284	}
2285}
2286
2287/*
2288 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2289 * allows the thread to return from interrupt. After that handle_swbp()
2290 * sets utask->active_uprobe.
2291 *
2292 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2293 * and allows the thread to return from interrupt.
2294 *
2295 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2296 * uprobe_notify_resume().
2297 */
2298void uprobe_notify_resume(struct pt_regs *regs)
2299{
2300	struct uprobe_task *utask;
2301
2302	clear_thread_flag(TIF_UPROBE);
2303
2304	utask = current->utask;
2305	if (utask && utask->active_uprobe)
2306		handle_singlestep(utask, regs);
2307	else
2308		handle_swbp(regs);
2309}
2310
2311/*
2312 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2313 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2314 */
2315int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2316{
2317	if (!current->mm)
2318		return 0;
2319
2320	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2321	    (!current->utask || !current->utask->return_instances))
2322		return 0;
2323
2324	set_thread_flag(TIF_UPROBE);
2325	return 1;
2326}
2327
2328/*
2329 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2330 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2331 */
2332int uprobe_post_sstep_notifier(struct pt_regs *regs)
2333{
2334	struct uprobe_task *utask = current->utask;
2335
2336	if (!current->mm || !utask || !utask->active_uprobe)
2337		/* task is currently not uprobed */
2338		return 0;
2339
2340	utask->state = UTASK_SSTEP_ACK;
2341	set_thread_flag(TIF_UPROBE);
2342	return 1;
2343}
2344
2345static struct notifier_block uprobe_exception_nb = {
2346	.notifier_call		= arch_uprobe_exception_notify,
2347	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2348};
2349
2350void __init uprobes_init(void)
2351{
2352	int i;
2353
2354	for (i = 0; i < UPROBES_HASH_SZ; i++)
2355		mutex_init(&uprobes_mmap_mutex[i]);
2356
2357	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2358}
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * User-space Probes (UProbes)
   4 *
   5 * Copyright (C) IBM Corporation, 2008-2012
   6 * Authors:
   7 *	Srikar Dronamraju
   8 *	Jim Keniston
   9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/highmem.h>
  14#include <linux/pagemap.h>	/* read_mapping_page */
  15#include <linux/slab.h>
  16#include <linux/sched.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/coredump.h>
  19#include <linux/export.h>
  20#include <linux/rmap.h>		/* anon_vma_prepare */
  21#include <linux/mmu_notifier.h>	/* set_pte_at_notify */
  22#include <linux/swap.h>		/* folio_free_swap */
  23#include <linux/ptrace.h>	/* user_enable_single_step */
  24#include <linux/kdebug.h>	/* notifier mechanism */
 
  25#include <linux/percpu-rwsem.h>
  26#include <linux/task_work.h>
  27#include <linux/shmem_fs.h>
  28#include <linux/khugepaged.h>
  29
  30#include <linux/uprobes.h>
  31
  32#define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
  33#define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
  34
  35static struct rb_root uprobes_tree = RB_ROOT;
  36/*
  37 * allows us to skip the uprobe_mmap if there are no uprobe events active
  38 * at this time.  Probably a fine grained per inode count is better?
  39 */
  40#define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
  41
  42static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
  43
  44#define UPROBES_HASH_SZ	13
  45/* serialize uprobe->pending_list */
  46static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  47#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  48
  49DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
  50
  51/* Have a copy of original instruction */
  52#define UPROBE_COPY_INSN	0
  53
  54struct uprobe {
  55	struct rb_node		rb_node;	/* node in the rb tree */
  56	refcount_t		ref;
  57	struct rw_semaphore	register_rwsem;
  58	struct rw_semaphore	consumer_rwsem;
  59	struct list_head	pending_list;
  60	struct uprobe_consumer	*consumers;
  61	struct inode		*inode;		/* Also hold a ref to inode */
  62	loff_t			offset;
  63	loff_t			ref_ctr_offset;
  64	unsigned long		flags;
  65
  66	/*
  67	 * The generic code assumes that it has two members of unknown type
  68	 * owned by the arch-specific code:
  69	 *
  70	 * 	insn -	copy_insn() saves the original instruction here for
  71	 *		arch_uprobe_analyze_insn().
  72	 *
  73	 *	ixol -	potentially modified instruction to execute out of
  74	 *		line, copied to xol_area by xol_get_insn_slot().
  75	 */
  76	struct arch_uprobe	arch;
  77};
  78
  79struct delayed_uprobe {
  80	struct list_head list;
  81	struct uprobe *uprobe;
  82	struct mm_struct *mm;
  83};
  84
  85static DEFINE_MUTEX(delayed_uprobe_lock);
  86static LIST_HEAD(delayed_uprobe_list);
  87
  88/*
  89 * Execute out of line area: anonymous executable mapping installed
  90 * by the probed task to execute the copy of the original instruction
  91 * mangled by set_swbp().
  92 *
  93 * On a breakpoint hit, thread contests for a slot.  It frees the
  94 * slot after singlestep. Currently a fixed number of slots are
  95 * allocated.
  96 */
  97struct xol_area {
  98	wait_queue_head_t 		wq;		/* if all slots are busy */
  99	atomic_t 			slot_count;	/* number of in-use slots */
 100	unsigned long 			*bitmap;	/* 0 = free slot */
 101
 102	struct vm_special_mapping	xol_mapping;
 103	struct page 			*pages[2];
 104	/*
 105	 * We keep the vma's vm_start rather than a pointer to the vma
 106	 * itself.  The probed process or a naughty kernel module could make
 107	 * the vma go away, and we must handle that reasonably gracefully.
 108	 */
 109	unsigned long 			vaddr;		/* Page(s) of instruction slots */
 110};
 111
 112/*
 113 * valid_vma: Verify if the specified vma is an executable vma
 114 * Relax restrictions while unregistering: vm_flags might have
 115 * changed after breakpoint was inserted.
 116 *	- is_register: indicates if we are in register context.
 117 *	- Return 1 if the specified virtual address is in an
 118 *	  executable vma.
 119 */
 120static bool valid_vma(struct vm_area_struct *vma, bool is_register)
 121{
 122	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
 123
 124	if (is_register)
 125		flags |= VM_WRITE;
 126
 127	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
 128}
 129
 130static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
 131{
 132	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 133}
 134
 135static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
 136{
 137	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
 138}
 139
 140/**
 141 * __replace_page - replace page in vma by new page.
 142 * based on replace_page in mm/ksm.c
 143 *
 144 * @vma:      vma that holds the pte pointing to page
 145 * @addr:     address the old @page is mapped at
 146 * @old_page: the page we are replacing by new_page
 147 * @new_page: the modified page we replace page by
 148 *
 149 * If @new_page is NULL, only unmap @old_page.
 150 *
 151 * Returns 0 on success, negative error code otherwise.
 152 */
 153static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 154				struct page *old_page, struct page *new_page)
 155{
 156	struct folio *old_folio = page_folio(old_page);
 157	struct folio *new_folio;
 158	struct mm_struct *mm = vma->vm_mm;
 159	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
 
 
 
 
 160	int err;
 161	struct mmu_notifier_range range;
 
 162
 163	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
 164				addr + PAGE_SIZE);
 165
 166	if (new_page) {
 167		new_folio = page_folio(new_page);
 168		err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
 169		if (err)
 170			return err;
 171	}
 172
 173	/* For folio_free_swap() below */
 174	folio_lock(old_folio);
 175
 176	mmu_notifier_invalidate_range_start(&range);
 177	err = -EAGAIN;
 178	if (!page_vma_mapped_walk(&pvmw))
 
 
 179		goto unlock;
 
 180	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 181
 182	if (new_page) {
 183		folio_get(new_folio);
 184		folio_add_new_anon_rmap(new_folio, vma, addr);
 185		folio_add_lru_vma(new_folio, vma);
 
 186	} else
 187		/* no new page, just dec_mm_counter for old_page */
 188		dec_mm_counter(mm, MM_ANONPAGES);
 189
 190	if (!folio_test_anon(old_folio)) {
 191		dec_mm_counter(mm, mm_counter_file(old_page));
 192		inc_mm_counter(mm, MM_ANONPAGES);
 193	}
 194
 195	flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
 196	ptep_clear_flush(vma, addr, pvmw.pte);
 197	if (new_page)
 198		set_pte_at_notify(mm, addr, pvmw.pte,
 199				  mk_pte(new_page, vma->vm_page_prot));
 200
 201	folio_remove_rmap_pte(old_folio, old_page, vma);
 202	if (!folio_mapped(old_folio))
 203		folio_free_swap(old_folio);
 204	page_vma_mapped_walk_done(&pvmw);
 205	folio_put(old_folio);
 
 
 
 206
 207	err = 0;
 208 unlock:
 209	mmu_notifier_invalidate_range_end(&range);
 210	folio_unlock(old_folio);
 211	return err;
 212}
 213
 214/**
 215 * is_swbp_insn - check if instruction is breakpoint instruction.
 216 * @insn: instruction to be checked.
 217 * Default implementation of is_swbp_insn
 218 * Returns true if @insn is a breakpoint instruction.
 219 */
 220bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 221{
 222	return *insn == UPROBE_SWBP_INSN;
 223}
 224
 225/**
 226 * is_trap_insn - check if instruction is breakpoint instruction.
 227 * @insn: instruction to be checked.
 228 * Default implementation of is_trap_insn
 229 * Returns true if @insn is a breakpoint instruction.
 230 *
 231 * This function is needed for the case where an architecture has multiple
 232 * trap instructions (like powerpc).
 233 */
 234bool __weak is_trap_insn(uprobe_opcode_t *insn)
 235{
 236	return is_swbp_insn(insn);
 237}
 238
 239static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
 240{
 241	void *kaddr = kmap_atomic(page);
 242	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
 243	kunmap_atomic(kaddr);
 244}
 245
 246static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
 247{
 248	void *kaddr = kmap_atomic(page);
 249	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
 250	kunmap_atomic(kaddr);
 251}
 252
 253static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
 254{
 255	uprobe_opcode_t old_opcode;
 256	bool is_swbp;
 257
 258	/*
 259	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
 260	 * We do not check if it is any other 'trap variant' which could
 261	 * be conditional trap instruction such as the one powerpc supports.
 262	 *
 263	 * The logic is that we do not care if the underlying instruction
 264	 * is a trap variant; uprobes always wins over any other (gdb)
 265	 * breakpoint.
 266	 */
 267	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
 268	is_swbp = is_swbp_insn(&old_opcode);
 269
 270	if (is_swbp_insn(new_opcode)) {
 271		if (is_swbp)		/* register: already installed? */
 272			return 0;
 273	} else {
 274		if (!is_swbp)		/* unregister: was it changed by us? */
 275			return 0;
 276	}
 277
 278	return 1;
 279}
 280
 281static struct delayed_uprobe *
 282delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
 283{
 284	struct delayed_uprobe *du;
 285
 286	list_for_each_entry(du, &delayed_uprobe_list, list)
 287		if (du->uprobe == uprobe && du->mm == mm)
 288			return du;
 289	return NULL;
 290}
 291
 292static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
 293{
 294	struct delayed_uprobe *du;
 295
 296	if (delayed_uprobe_check(uprobe, mm))
 297		return 0;
 298
 299	du  = kzalloc(sizeof(*du), GFP_KERNEL);
 300	if (!du)
 301		return -ENOMEM;
 302
 303	du->uprobe = uprobe;
 304	du->mm = mm;
 305	list_add(&du->list, &delayed_uprobe_list);
 306	return 0;
 307}
 308
 309static void delayed_uprobe_delete(struct delayed_uprobe *du)
 310{
 311	if (WARN_ON(!du))
 312		return;
 313	list_del(&du->list);
 314	kfree(du);
 315}
 316
 317static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
 318{
 319	struct list_head *pos, *q;
 320	struct delayed_uprobe *du;
 321
 322	if (!uprobe && !mm)
 323		return;
 324
 325	list_for_each_safe(pos, q, &delayed_uprobe_list) {
 326		du = list_entry(pos, struct delayed_uprobe, list);
 327
 328		if (uprobe && du->uprobe != uprobe)
 329			continue;
 330		if (mm && du->mm != mm)
 331			continue;
 332
 333		delayed_uprobe_delete(du);
 334	}
 335}
 336
 337static bool valid_ref_ctr_vma(struct uprobe *uprobe,
 338			      struct vm_area_struct *vma)
 339{
 340	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
 341
 342	return uprobe->ref_ctr_offset &&
 343		vma->vm_file &&
 344		file_inode(vma->vm_file) == uprobe->inode &&
 345		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
 346		vma->vm_start <= vaddr &&
 347		vma->vm_end > vaddr;
 348}
 349
 350static struct vm_area_struct *
 351find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
 352{
 353	VMA_ITERATOR(vmi, mm, 0);
 354	struct vm_area_struct *tmp;
 355
 356	for_each_vma(vmi, tmp)
 357		if (valid_ref_ctr_vma(uprobe, tmp))
 358			return tmp;
 359
 360	return NULL;
 361}
 362
 363static int
 364__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
 365{
 366	void *kaddr;
 367	struct page *page;
 
 368	int ret;
 369	short *ptr;
 370
 371	if (!vaddr || !d)
 372		return -EINVAL;
 373
 374	ret = get_user_pages_remote(mm, vaddr, 1,
 375				    FOLL_WRITE, &page, NULL);
 376	if (unlikely(ret <= 0)) {
 377		/*
 378		 * We are asking for 1 page. If get_user_pages_remote() fails,
 379		 * it may return 0, in that case we have to return error.
 380		 */
 381		return ret == 0 ? -EBUSY : ret;
 382	}
 383
 384	kaddr = kmap_atomic(page);
 385	ptr = kaddr + (vaddr & ~PAGE_MASK);
 386
 387	if (unlikely(*ptr + d < 0)) {
 388		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
 389			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
 390		ret = -EINVAL;
 391		goto out;
 392	}
 393
 394	*ptr += d;
 395	ret = 0;
 396out:
 397	kunmap_atomic(kaddr);
 398	put_page(page);
 399	return ret;
 400}
 401
 402static void update_ref_ctr_warn(struct uprobe *uprobe,
 403				struct mm_struct *mm, short d)
 404{
 405	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
 406		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
 407		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
 408		(unsigned long long) uprobe->offset,
 409		(unsigned long long) uprobe->ref_ctr_offset, mm);
 410}
 411
 412static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
 413			  short d)
 414{
 415	struct vm_area_struct *rc_vma;
 416	unsigned long rc_vaddr;
 417	int ret = 0;
 418
 419	rc_vma = find_ref_ctr_vma(uprobe, mm);
 420
 421	if (rc_vma) {
 422		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
 423		ret = __update_ref_ctr(mm, rc_vaddr, d);
 424		if (ret)
 425			update_ref_ctr_warn(uprobe, mm, d);
 426
 427		if (d > 0)
 428			return ret;
 429	}
 430
 431	mutex_lock(&delayed_uprobe_lock);
 432	if (d > 0)
 433		ret = delayed_uprobe_add(uprobe, mm);
 434	else
 435		delayed_uprobe_remove(uprobe, mm);
 436	mutex_unlock(&delayed_uprobe_lock);
 437
 438	return ret;
 439}
 440
 441/*
 442 * NOTE:
 443 * Expect the breakpoint instruction to be the smallest size instruction for
 444 * the architecture. If an arch has variable length instruction and the
 445 * breakpoint instruction is not of the smallest length instruction
 446 * supported by that architecture then we need to modify is_trap_at_addr and
 447 * uprobe_write_opcode accordingly. This would never be a problem for archs
 448 * that have fixed length instructions.
 449 *
 450 * uprobe_write_opcode - write the opcode at a given virtual address.
 451 * @auprobe: arch specific probepoint information.
 452 * @mm: the probed process address space.
 453 * @vaddr: the virtual address to store the opcode.
 454 * @opcode: opcode to be written at @vaddr.
 455 *
 456 * Called with mm->mmap_lock held for write.
 457 * Return 0 (success) or a negative errno.
 458 */
 459int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 460			unsigned long vaddr, uprobe_opcode_t opcode)
 461{
 462	struct uprobe *uprobe;
 463	struct page *old_page, *new_page;
 464	struct vm_area_struct *vma;
 465	int ret, is_register, ref_ctr_updated = 0;
 466	bool orig_page_huge = false;
 467	unsigned int gup_flags = FOLL_FORCE;
 468
 469	is_register = is_swbp_insn(&opcode);
 470	uprobe = container_of(auprobe, struct uprobe, arch);
 471
 472retry:
 473	if (is_register)
 474		gup_flags |= FOLL_SPLIT_PMD;
 475	/* Read the page with vaddr into memory */
 476	old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
 477	if (IS_ERR(old_page))
 478		return PTR_ERR(old_page);
 
 479
 480	ret = verify_opcode(old_page, vaddr, &opcode);
 481	if (ret <= 0)
 482		goto put_old;
 483
 484	if (WARN(!is_register && PageCompound(old_page),
 485		 "uprobe unregister should never work on compound page\n")) {
 486		ret = -EINVAL;
 487		goto put_old;
 488	}
 489
 490	/* We are going to replace instruction, update ref_ctr. */
 491	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
 492		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
 493		if (ret)
 494			goto put_old;
 495
 496		ref_ctr_updated = 1;
 497	}
 498
 499	ret = 0;
 500	if (!is_register && !PageAnon(old_page))
 501		goto put_old;
 502
 503	ret = anon_vma_prepare(vma);
 504	if (ret)
 505		goto put_old;
 506
 507	ret = -ENOMEM;
 508	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
 509	if (!new_page)
 510		goto put_old;
 511
 512	__SetPageUptodate(new_page);
 513	copy_highpage(new_page, old_page);
 514	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 515
 516	if (!is_register) {
 517		struct page *orig_page;
 518		pgoff_t index;
 519
 520		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
 521
 522		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
 523		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
 524					  index);
 525
 526		if (orig_page) {
 527			if (PageUptodate(orig_page) &&
 528			    pages_identical(new_page, orig_page)) {
 529				/* let go new_page */
 530				put_page(new_page);
 531				new_page = NULL;
 532
 533				if (PageCompound(orig_page))
 534					orig_page_huge = true;
 535			}
 536			put_page(orig_page);
 537		}
 538	}
 539
 540	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
 541	if (new_page)
 542		put_page(new_page);
 543put_old:
 544	put_page(old_page);
 545
 546	if (unlikely(ret == -EAGAIN))
 547		goto retry;
 548
 549	/* Revert back reference counter if instruction update failed. */
 550	if (ret && is_register && ref_ctr_updated)
 551		update_ref_ctr(uprobe, mm, -1);
 552
 553	/* try collapse pmd for compound page */
 554	if (!ret && orig_page_huge)
 555		collapse_pte_mapped_thp(mm, vaddr, false);
 556
 557	return ret;
 558}
 559
 560/**
 561 * set_swbp - store breakpoint at a given address.
 562 * @auprobe: arch specific probepoint information.
 563 * @mm: the probed process address space.
 564 * @vaddr: the virtual address to insert the opcode.
 565 *
 566 * For mm @mm, store the breakpoint instruction at @vaddr.
 567 * Return 0 (success) or a negative errno.
 568 */
 569int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 570{
 571	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 572}
 573
 574/**
 575 * set_orig_insn - Restore the original instruction.
 576 * @mm: the probed process address space.
 577 * @auprobe: arch specific probepoint information.
 578 * @vaddr: the virtual address to insert the opcode.
 579 *
 580 * For mm @mm, restore the original opcode (opcode) at @vaddr.
 581 * Return 0 (success) or a negative errno.
 582 */
 583int __weak
 584set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 585{
 586	return uprobe_write_opcode(auprobe, mm, vaddr,
 587			*(uprobe_opcode_t *)&auprobe->insn);
 588}
 589
 590static struct uprobe *get_uprobe(struct uprobe *uprobe)
 591{
 592	refcount_inc(&uprobe->ref);
 593	return uprobe;
 594}
 595
 596static void put_uprobe(struct uprobe *uprobe)
 597{
 598	if (refcount_dec_and_test(&uprobe->ref)) {
 599		/*
 600		 * If application munmap(exec_vma) before uprobe_unregister()
 601		 * gets called, we don't get a chance to remove uprobe from
 602		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
 603		 */
 604		mutex_lock(&delayed_uprobe_lock);
 605		delayed_uprobe_remove(uprobe, NULL);
 606		mutex_unlock(&delayed_uprobe_lock);
 607		kfree(uprobe);
 608	}
 609}
 610
 611static __always_inline
 612int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
 613	       const struct uprobe *r)
 614{
 615	if (l_inode < r->inode)
 616		return -1;
 617
 618	if (l_inode > r->inode)
 619		return 1;
 620
 621	if (l_offset < r->offset)
 622		return -1;
 623
 624	if (l_offset > r->offset)
 625		return 1;
 626
 627	return 0;
 628}
 629
 630#define __node_2_uprobe(node) \
 631	rb_entry((node), struct uprobe, rb_node)
 632
 633struct __uprobe_key {
 634	struct inode *inode;
 635	loff_t offset;
 636};
 637
 638static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
 639{
 640	const struct __uprobe_key *a = key;
 641	return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
 642}
 643
 644static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
 645{
 646	struct uprobe *u = __node_2_uprobe(a);
 647	return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
 648}
 649
 650static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
 651{
 652	struct __uprobe_key key = {
 653		.inode = inode,
 654		.offset = offset,
 655	};
 656	struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
 657
 658	if (node)
 659		return get_uprobe(__node_2_uprobe(node));
 
 
 
 660
 
 
 
 
 
 661	return NULL;
 662}
 663
 664/*
 665 * Find a uprobe corresponding to a given inode:offset
 666 * Acquires uprobes_treelock
 667 */
 668static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 669{
 670	struct uprobe *uprobe;
 671
 672	spin_lock(&uprobes_treelock);
 673	uprobe = __find_uprobe(inode, offset);
 674	spin_unlock(&uprobes_treelock);
 675
 676	return uprobe;
 677}
 678
 679static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
 680{
 681	struct rb_node *node;
 
 
 
 
 
 
 
 
 
 
 682
 683	node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
 684	if (node)
 685		return get_uprobe(__node_2_uprobe(node));
 
 686
 
 
 
 
 
 687	/* get access + creation ref */
 688	refcount_set(&uprobe->ref, 2);
 689	return NULL;
 
 690}
 691
 692/*
 693 * Acquire uprobes_treelock.
 694 * Matching uprobe already exists in rbtree;
 695 *	increment (access refcount) and return the matching uprobe.
 696 *
 697 * No matching uprobe; insert the uprobe in rb_tree;
 698 *	get a double refcount (access + creation) and return NULL.
 699 */
 700static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 701{
 702	struct uprobe *u;
 703
 704	spin_lock(&uprobes_treelock);
 705	u = __insert_uprobe(uprobe);
 706	spin_unlock(&uprobes_treelock);
 707
 708	return u;
 709}
 710
 711static void
 712ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
 713{
 714	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
 715		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
 716		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
 717		(unsigned long long) cur_uprobe->ref_ctr_offset,
 718		(unsigned long long) uprobe->ref_ctr_offset);
 719}
 720
 721static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
 722				   loff_t ref_ctr_offset)
 723{
 724	struct uprobe *uprobe, *cur_uprobe;
 725
 726	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
 727	if (!uprobe)
 728		return NULL;
 729
 730	uprobe->inode = inode;
 731	uprobe->offset = offset;
 732	uprobe->ref_ctr_offset = ref_ctr_offset;
 733	init_rwsem(&uprobe->register_rwsem);
 734	init_rwsem(&uprobe->consumer_rwsem);
 735
 736	/* add to uprobes_tree, sorted on inode:offset */
 737	cur_uprobe = insert_uprobe(uprobe);
 738	/* a uprobe exists for this inode:offset combination */
 739	if (cur_uprobe) {
 740		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
 741			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
 742			put_uprobe(cur_uprobe);
 743			kfree(uprobe);
 744			return ERR_PTR(-EINVAL);
 745		}
 746		kfree(uprobe);
 747		uprobe = cur_uprobe;
 748	}
 749
 750	return uprobe;
 751}
 752
 753static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 754{
 755	down_write(&uprobe->consumer_rwsem);
 756	uc->next = uprobe->consumers;
 757	uprobe->consumers = uc;
 758	up_write(&uprobe->consumer_rwsem);
 759}
 760
 761/*
 762 * For uprobe @uprobe, delete the consumer @uc.
 763 * Return true if the @uc is deleted successfully
 764 * or return false.
 765 */
 766static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 767{
 768	struct uprobe_consumer **con;
 769	bool ret = false;
 770
 771	down_write(&uprobe->consumer_rwsem);
 772	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
 773		if (*con == uc) {
 774			*con = uc->next;
 775			ret = true;
 776			break;
 777		}
 778	}
 779	up_write(&uprobe->consumer_rwsem);
 780
 781	return ret;
 782}
 783
 784static int __copy_insn(struct address_space *mapping, struct file *filp,
 785			void *insn, int nbytes, loff_t offset)
 786{
 787	struct page *page;
 788	/*
 789	 * Ensure that the page that has the original instruction is populated
 790	 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
 791	 * see uprobe_register().
 792	 */
 793	if (mapping->a_ops->read_folio)
 794		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
 795	else
 796		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 797	if (IS_ERR(page))
 798		return PTR_ERR(page);
 799
 800	copy_from_page(page, offset, insn, nbytes);
 801	put_page(page);
 802
 803	return 0;
 804}
 805
 806static int copy_insn(struct uprobe *uprobe, struct file *filp)
 807{
 808	struct address_space *mapping = uprobe->inode->i_mapping;
 809	loff_t offs = uprobe->offset;
 810	void *insn = &uprobe->arch.insn;
 811	int size = sizeof(uprobe->arch.insn);
 812	int len, err = -EIO;
 813
 814	/* Copy only available bytes, -EIO if nothing was read */
 815	do {
 816		if (offs >= i_size_read(uprobe->inode))
 817			break;
 818
 819		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
 820		err = __copy_insn(mapping, filp, insn, len, offs);
 821		if (err)
 822			break;
 823
 824		insn += len;
 825		offs += len;
 826		size -= len;
 827	} while (size);
 828
 829	return err;
 830}
 831
 832static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
 833				struct mm_struct *mm, unsigned long vaddr)
 834{
 835	int ret = 0;
 836
 837	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 838		return ret;
 839
 840	/* TODO: move this into _register, until then we abuse this sem. */
 841	down_write(&uprobe->consumer_rwsem);
 842	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 843		goto out;
 844
 845	ret = copy_insn(uprobe, file);
 846	if (ret)
 847		goto out;
 848
 849	ret = -ENOTSUPP;
 850	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
 851		goto out;
 852
 853	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
 854	if (ret)
 855		goto out;
 856
 
 
 
 
 857	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
 858	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 859
 860 out:
 861	up_write(&uprobe->consumer_rwsem);
 862
 863	return ret;
 864}
 865
 866static inline bool consumer_filter(struct uprobe_consumer *uc,
 867				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 868{
 869	return !uc->filter || uc->filter(uc, ctx, mm);
 870}
 871
 872static bool filter_chain(struct uprobe *uprobe,
 873			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 874{
 875	struct uprobe_consumer *uc;
 876	bool ret = false;
 877
 878	down_read(&uprobe->consumer_rwsem);
 879	for (uc = uprobe->consumers; uc; uc = uc->next) {
 880		ret = consumer_filter(uc, ctx, mm);
 881		if (ret)
 882			break;
 883	}
 884	up_read(&uprobe->consumer_rwsem);
 885
 886	return ret;
 887}
 888
 889static int
 890install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 891			struct vm_area_struct *vma, unsigned long vaddr)
 892{
 893	bool first_uprobe;
 894	int ret;
 895
 896	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
 897	if (ret)
 898		return ret;
 899
 900	/*
 901	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
 902	 * the task can hit this breakpoint right after __replace_page().
 903	 */
 904	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
 905	if (first_uprobe)
 906		set_bit(MMF_HAS_UPROBES, &mm->flags);
 907
 908	ret = set_swbp(&uprobe->arch, mm, vaddr);
 909	if (!ret)
 910		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
 911	else if (first_uprobe)
 912		clear_bit(MMF_HAS_UPROBES, &mm->flags);
 913
 914	return ret;
 915}
 916
 917static int
 918remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 919{
 920	set_bit(MMF_RECALC_UPROBES, &mm->flags);
 921	return set_orig_insn(&uprobe->arch, mm, vaddr);
 922}
 923
 924static inline bool uprobe_is_active(struct uprobe *uprobe)
 925{
 926	return !RB_EMPTY_NODE(&uprobe->rb_node);
 927}
 928/*
 929 * There could be threads that have already hit the breakpoint. They
 930 * will recheck the current insn and restart if find_uprobe() fails.
 931 * See find_active_uprobe().
 932 */
 933static void delete_uprobe(struct uprobe *uprobe)
 934{
 935	if (WARN_ON(!uprobe_is_active(uprobe)))
 936		return;
 937
 938	spin_lock(&uprobes_treelock);
 939	rb_erase(&uprobe->rb_node, &uprobes_tree);
 940	spin_unlock(&uprobes_treelock);
 941	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
 942	put_uprobe(uprobe);
 943}
 944
 945struct map_info {
 946	struct map_info *next;
 947	struct mm_struct *mm;
 948	unsigned long vaddr;
 949};
 950
 951static inline struct map_info *free_map_info(struct map_info *info)
 952{
 953	struct map_info *next = info->next;
 954	kfree(info);
 955	return next;
 956}
 957
 958static struct map_info *
 959build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
 960{
 961	unsigned long pgoff = offset >> PAGE_SHIFT;
 962	struct vm_area_struct *vma;
 963	struct map_info *curr = NULL;
 964	struct map_info *prev = NULL;
 965	struct map_info *info;
 966	int more = 0;
 967
 968 again:
 969	i_mmap_lock_read(mapping);
 970	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
 971		if (!valid_vma(vma, is_register))
 972			continue;
 973
 974		if (!prev && !more) {
 975			/*
 976			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
 977			 * reclaim. This is optimistic, no harm done if it fails.
 978			 */
 979			prev = kmalloc(sizeof(struct map_info),
 980					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
 981			if (prev)
 982				prev->next = NULL;
 983		}
 984		if (!prev) {
 985			more++;
 986			continue;
 987		}
 988
 989		if (!mmget_not_zero(vma->vm_mm))
 990			continue;
 991
 992		info = prev;
 993		prev = prev->next;
 994		info->next = curr;
 995		curr = info;
 996
 997		info->mm = vma->vm_mm;
 998		info->vaddr = offset_to_vaddr(vma, offset);
 999	}
1000	i_mmap_unlock_read(mapping);
1001
1002	if (!more)
1003		goto out;
1004
1005	prev = curr;
1006	while (curr) {
1007		mmput(curr->mm);
1008		curr = curr->next;
1009	}
1010
1011	do {
1012		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1013		if (!info) {
1014			curr = ERR_PTR(-ENOMEM);
1015			goto out;
1016		}
1017		info->next = prev;
1018		prev = info;
1019	} while (--more);
1020
1021	goto again;
1022 out:
1023	while (prev)
1024		prev = free_map_info(prev);
1025	return curr;
1026}
1027
1028static int
1029register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1030{
1031	bool is_register = !!new;
1032	struct map_info *info;
1033	int err = 0;
1034
1035	percpu_down_write(&dup_mmap_sem);
1036	info = build_map_info(uprobe->inode->i_mapping,
1037					uprobe->offset, is_register);
1038	if (IS_ERR(info)) {
1039		err = PTR_ERR(info);
1040		goto out;
1041	}
1042
1043	while (info) {
1044		struct mm_struct *mm = info->mm;
1045		struct vm_area_struct *vma;
1046
1047		if (err && is_register)
1048			goto free;
1049
1050		mmap_write_lock(mm);
1051		vma = find_vma(mm, info->vaddr);
1052		if (!vma || !valid_vma(vma, is_register) ||
1053		    file_inode(vma->vm_file) != uprobe->inode)
1054			goto unlock;
1055
1056		if (vma->vm_start > info->vaddr ||
1057		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1058			goto unlock;
1059
1060		if (is_register) {
1061			/* consult only the "caller", new consumer. */
1062			if (consumer_filter(new,
1063					UPROBE_FILTER_REGISTER, mm))
1064				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1065		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1066			if (!filter_chain(uprobe,
1067					UPROBE_FILTER_UNREGISTER, mm))
1068				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1069		}
1070
1071 unlock:
1072		mmap_write_unlock(mm);
1073 free:
1074		mmput(mm);
1075		info = free_map_info(info);
1076	}
1077 out:
1078	percpu_up_write(&dup_mmap_sem);
1079	return err;
1080}
1081
1082static void
1083__uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1084{
1085	int err;
1086
1087	if (WARN_ON(!consumer_del(uprobe, uc)))
1088		return;
1089
1090	err = register_for_each_vma(uprobe, NULL);
1091	/* TODO : cant unregister? schedule a worker thread */
1092	if (!uprobe->consumers && !err)
1093		delete_uprobe(uprobe);
1094}
1095
1096/*
1097 * uprobe_unregister - unregister an already registered probe.
1098 * @inode: the file in which the probe has to be removed.
1099 * @offset: offset from the start of the file.
1100 * @uc: identify which probe if multiple probes are colocated.
1101 */
1102void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1103{
1104	struct uprobe *uprobe;
1105
1106	uprobe = find_uprobe(inode, offset);
1107	if (WARN_ON(!uprobe))
1108		return;
1109
1110	down_write(&uprobe->register_rwsem);
1111	__uprobe_unregister(uprobe, uc);
1112	up_write(&uprobe->register_rwsem);
1113	put_uprobe(uprobe);
1114}
1115EXPORT_SYMBOL_GPL(uprobe_unregister);
1116
1117/*
1118 * __uprobe_register - register a probe
1119 * @inode: the file in which the probe has to be placed.
1120 * @offset: offset from the start of the file.
1121 * @uc: information on howto handle the probe..
1122 *
1123 * Apart from the access refcount, __uprobe_register() takes a creation
1124 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1125 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1126 * tuple).  Creation refcount stops uprobe_unregister from freeing the
1127 * @uprobe even before the register operation is complete. Creation
1128 * refcount is released when the last @uc for the @uprobe
1129 * unregisters. Caller of __uprobe_register() is required to keep @inode
1130 * (and the containing mount) referenced.
1131 *
1132 * Return errno if it cannot successully install probes
1133 * else return 0 (success)
1134 */
1135static int __uprobe_register(struct inode *inode, loff_t offset,
1136			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1137{
1138	struct uprobe *uprobe;
1139	int ret;
1140
1141	/* Uprobe must have at least one set consumer */
1142	if (!uc->handler && !uc->ret_handler)
1143		return -EINVAL;
1144
1145	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1146	if (!inode->i_mapping->a_ops->read_folio &&
1147	    !shmem_mapping(inode->i_mapping))
1148		return -EIO;
1149	/* Racy, just to catch the obvious mistakes */
1150	if (offset > i_size_read(inode))
1151		return -EINVAL;
1152
1153	/*
1154	 * This ensures that copy_from_page(), copy_to_page() and
1155	 * __update_ref_ctr() can't cross page boundary.
1156	 */
1157	if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1158		return -EINVAL;
1159	if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1160		return -EINVAL;
1161
1162 retry:
1163	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1164	if (!uprobe)
1165		return -ENOMEM;
1166	if (IS_ERR(uprobe))
1167		return PTR_ERR(uprobe);
1168
1169	/*
1170	 * We can race with uprobe_unregister()->delete_uprobe().
1171	 * Check uprobe_is_active() and retry if it is false.
1172	 */
1173	down_write(&uprobe->register_rwsem);
1174	ret = -EAGAIN;
1175	if (likely(uprobe_is_active(uprobe))) {
1176		consumer_add(uprobe, uc);
1177		ret = register_for_each_vma(uprobe, uc);
1178		if (ret)
1179			__uprobe_unregister(uprobe, uc);
1180	}
1181	up_write(&uprobe->register_rwsem);
1182	put_uprobe(uprobe);
1183
1184	if (unlikely(ret == -EAGAIN))
1185		goto retry;
1186	return ret;
1187}
1188
1189int uprobe_register(struct inode *inode, loff_t offset,
1190		    struct uprobe_consumer *uc)
1191{
1192	return __uprobe_register(inode, offset, 0, uc);
1193}
1194EXPORT_SYMBOL_GPL(uprobe_register);
1195
1196int uprobe_register_refctr(struct inode *inode, loff_t offset,
1197			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1198{
1199	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1200}
1201EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1202
1203/*
1204 * uprobe_apply - unregister an already registered probe.
1205 * @inode: the file in which the probe has to be removed.
1206 * @offset: offset from the start of the file.
1207 * @uc: consumer which wants to add more or remove some breakpoints
1208 * @add: add or remove the breakpoints
1209 */
1210int uprobe_apply(struct inode *inode, loff_t offset,
1211			struct uprobe_consumer *uc, bool add)
1212{
1213	struct uprobe *uprobe;
1214	struct uprobe_consumer *con;
1215	int ret = -ENOENT;
1216
1217	uprobe = find_uprobe(inode, offset);
1218	if (WARN_ON(!uprobe))
1219		return ret;
1220
1221	down_write(&uprobe->register_rwsem);
1222	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1223		;
1224	if (con)
1225		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1226	up_write(&uprobe->register_rwsem);
1227	put_uprobe(uprobe);
1228
1229	return ret;
1230}
1231
1232static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1233{
1234	VMA_ITERATOR(vmi, mm, 0);
1235	struct vm_area_struct *vma;
1236	int err = 0;
1237
1238	mmap_read_lock(mm);
1239	for_each_vma(vmi, vma) {
1240		unsigned long vaddr;
1241		loff_t offset;
1242
1243		if (!valid_vma(vma, false) ||
1244		    file_inode(vma->vm_file) != uprobe->inode)
1245			continue;
1246
1247		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1248		if (uprobe->offset <  offset ||
1249		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1250			continue;
1251
1252		vaddr = offset_to_vaddr(vma, uprobe->offset);
1253		err |= remove_breakpoint(uprobe, mm, vaddr);
1254	}
1255	mmap_read_unlock(mm);
1256
1257	return err;
1258}
1259
1260static struct rb_node *
1261find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1262{
1263	struct rb_node *n = uprobes_tree.rb_node;
1264
1265	while (n) {
1266		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1267
1268		if (inode < u->inode) {
1269			n = n->rb_left;
1270		} else if (inode > u->inode) {
1271			n = n->rb_right;
1272		} else {
1273			if (max < u->offset)
1274				n = n->rb_left;
1275			else if (min > u->offset)
1276				n = n->rb_right;
1277			else
1278				break;
1279		}
1280	}
1281
1282	return n;
1283}
1284
1285/*
1286 * For a given range in vma, build a list of probes that need to be inserted.
1287 */
1288static void build_probe_list(struct inode *inode,
1289				struct vm_area_struct *vma,
1290				unsigned long start, unsigned long end,
1291				struct list_head *head)
1292{
1293	loff_t min, max;
1294	struct rb_node *n, *t;
1295	struct uprobe *u;
1296
1297	INIT_LIST_HEAD(head);
1298	min = vaddr_to_offset(vma, start);
1299	max = min + (end - start) - 1;
1300
1301	spin_lock(&uprobes_treelock);
1302	n = find_node_in_range(inode, min, max);
1303	if (n) {
1304		for (t = n; t; t = rb_prev(t)) {
1305			u = rb_entry(t, struct uprobe, rb_node);
1306			if (u->inode != inode || u->offset < min)
1307				break;
1308			list_add(&u->pending_list, head);
1309			get_uprobe(u);
1310		}
1311		for (t = n; (t = rb_next(t)); ) {
1312			u = rb_entry(t, struct uprobe, rb_node);
1313			if (u->inode != inode || u->offset > max)
1314				break;
1315			list_add(&u->pending_list, head);
1316			get_uprobe(u);
1317		}
1318	}
1319	spin_unlock(&uprobes_treelock);
1320}
1321
1322/* @vma contains reference counter, not the probed instruction. */
1323static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1324{
1325	struct list_head *pos, *q;
1326	struct delayed_uprobe *du;
1327	unsigned long vaddr;
1328	int ret = 0, err = 0;
1329
1330	mutex_lock(&delayed_uprobe_lock);
1331	list_for_each_safe(pos, q, &delayed_uprobe_list) {
1332		du = list_entry(pos, struct delayed_uprobe, list);
1333
1334		if (du->mm != vma->vm_mm ||
1335		    !valid_ref_ctr_vma(du->uprobe, vma))
1336			continue;
1337
1338		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1339		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1340		if (ret) {
1341			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1342			if (!err)
1343				err = ret;
1344		}
1345		delayed_uprobe_delete(du);
1346	}
1347	mutex_unlock(&delayed_uprobe_lock);
1348	return err;
1349}
1350
1351/*
1352 * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
1353 *
1354 * Currently we ignore all errors and always return 0, the callers
1355 * can't handle the failure anyway.
1356 */
1357int uprobe_mmap(struct vm_area_struct *vma)
1358{
1359	struct list_head tmp_list;
1360	struct uprobe *uprobe, *u;
1361	struct inode *inode;
1362
1363	if (no_uprobe_events())
1364		return 0;
1365
1366	if (vma->vm_file &&
1367	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1368	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1369		delayed_ref_ctr_inc(vma);
1370
1371	if (!valid_vma(vma, true))
1372		return 0;
1373
1374	inode = file_inode(vma->vm_file);
1375	if (!inode)
1376		return 0;
1377
1378	mutex_lock(uprobes_mmap_hash(inode));
1379	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1380	/*
1381	 * We can race with uprobe_unregister(), this uprobe can be already
1382	 * removed. But in this case filter_chain() must return false, all
1383	 * consumers have gone away.
1384	 */
1385	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1386		if (!fatal_signal_pending(current) &&
1387		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1388			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1389			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1390		}
1391		put_uprobe(uprobe);
1392	}
1393	mutex_unlock(uprobes_mmap_hash(inode));
1394
1395	return 0;
1396}
1397
1398static bool
1399vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1400{
1401	loff_t min, max;
1402	struct inode *inode;
1403	struct rb_node *n;
1404
1405	inode = file_inode(vma->vm_file);
1406
1407	min = vaddr_to_offset(vma, start);
1408	max = min + (end - start) - 1;
1409
1410	spin_lock(&uprobes_treelock);
1411	n = find_node_in_range(inode, min, max);
1412	spin_unlock(&uprobes_treelock);
1413
1414	return !!n;
1415}
1416
1417/*
1418 * Called in context of a munmap of a vma.
1419 */
1420void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1421{
1422	if (no_uprobe_events() || !valid_vma(vma, false))
1423		return;
1424
1425	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1426		return;
1427
1428	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1429	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1430		return;
1431
1432	if (vma_has_uprobes(vma, start, end))
1433		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1434}
1435
1436/* Slot allocation for XOL */
1437static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1438{
1439	struct vm_area_struct *vma;
1440	int ret;
1441
1442	if (mmap_write_lock_killable(mm))
1443		return -EINTR;
1444
1445	if (mm->uprobes_state.xol_area) {
1446		ret = -EALREADY;
1447		goto fail;
1448	}
1449
1450	if (!area->vaddr) {
1451		/* Try to map as high as possible, this is only a hint. */
1452		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1453						PAGE_SIZE, 0, 0);
1454		if (IS_ERR_VALUE(area->vaddr)) {
1455			ret = area->vaddr;
1456			goto fail;
1457		}
1458	}
1459
1460	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1461				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1462				&area->xol_mapping);
1463	if (IS_ERR(vma)) {
1464		ret = PTR_ERR(vma);
1465		goto fail;
1466	}
1467
1468	ret = 0;
1469	/* pairs with get_xol_area() */
1470	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1471 fail:
1472	mmap_write_unlock(mm);
1473
1474	return ret;
1475}
1476
1477static struct xol_area *__create_xol_area(unsigned long vaddr)
1478{
1479	struct mm_struct *mm = current->mm;
1480	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1481	struct xol_area *area;
1482
1483	area = kmalloc(sizeof(*area), GFP_KERNEL);
1484	if (unlikely(!area))
1485		goto out;
1486
1487	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1488			       GFP_KERNEL);
1489	if (!area->bitmap)
1490		goto free_area;
1491
1492	area->xol_mapping.name = "[uprobes]";
1493	area->xol_mapping.fault = NULL;
1494	area->xol_mapping.pages = area->pages;
1495	area->pages[0] = alloc_page(GFP_HIGHUSER);
1496	if (!area->pages[0])
1497		goto free_bitmap;
1498	area->pages[1] = NULL;
1499
1500	area->vaddr = vaddr;
1501	init_waitqueue_head(&area->wq);
1502	/* Reserve the 1st slot for get_trampoline_vaddr() */
1503	set_bit(0, area->bitmap);
1504	atomic_set(&area->slot_count, 1);
1505	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1506
1507	if (!xol_add_vma(mm, area))
1508		return area;
1509
1510	__free_page(area->pages[0]);
1511 free_bitmap:
1512	kfree(area->bitmap);
1513 free_area:
1514	kfree(area);
1515 out:
1516	return NULL;
1517}
1518
1519/*
1520 * get_xol_area - Allocate process's xol_area if necessary.
1521 * This area will be used for storing instructions for execution out of line.
1522 *
1523 * Returns the allocated area or NULL.
1524 */
1525static struct xol_area *get_xol_area(void)
1526{
1527	struct mm_struct *mm = current->mm;
1528	struct xol_area *area;
1529
1530	if (!mm->uprobes_state.xol_area)
1531		__create_xol_area(0);
1532
1533	/* Pairs with xol_add_vma() smp_store_release() */
1534	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1535	return area;
1536}
1537
1538/*
1539 * uprobe_clear_state - Free the area allocated for slots.
1540 */
1541void uprobe_clear_state(struct mm_struct *mm)
1542{
1543	struct xol_area *area = mm->uprobes_state.xol_area;
1544
1545	mutex_lock(&delayed_uprobe_lock);
1546	delayed_uprobe_remove(NULL, mm);
1547	mutex_unlock(&delayed_uprobe_lock);
1548
1549	if (!area)
1550		return;
1551
1552	put_page(area->pages[0]);
1553	kfree(area->bitmap);
1554	kfree(area);
1555}
1556
1557void uprobe_start_dup_mmap(void)
1558{
1559	percpu_down_read(&dup_mmap_sem);
1560}
1561
1562void uprobe_end_dup_mmap(void)
1563{
1564	percpu_up_read(&dup_mmap_sem);
1565}
1566
1567void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1568{
1569	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1570		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1571		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1572		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1573	}
1574}
1575
1576/*
1577 *  - search for a free slot.
1578 */
1579static unsigned long xol_take_insn_slot(struct xol_area *area)
1580{
1581	unsigned long slot_addr;
1582	int slot_nr;
1583
1584	do {
1585		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1586		if (slot_nr < UINSNS_PER_PAGE) {
1587			if (!test_and_set_bit(slot_nr, area->bitmap))
1588				break;
1589
1590			slot_nr = UINSNS_PER_PAGE;
1591			continue;
1592		}
1593		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1594	} while (slot_nr >= UINSNS_PER_PAGE);
1595
1596	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1597	atomic_inc(&area->slot_count);
1598
1599	return slot_addr;
1600}
1601
1602/*
1603 * xol_get_insn_slot - allocate a slot for xol.
1604 * Returns the allocated slot address or 0.
1605 */
1606static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1607{
1608	struct xol_area *area;
1609	unsigned long xol_vaddr;
1610
1611	area = get_xol_area();
1612	if (!area)
1613		return 0;
1614
1615	xol_vaddr = xol_take_insn_slot(area);
1616	if (unlikely(!xol_vaddr))
1617		return 0;
1618
1619	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1620			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1621
1622	return xol_vaddr;
1623}
1624
1625/*
1626 * xol_free_insn_slot - If slot was earlier allocated by
1627 * @xol_get_insn_slot(), make the slot available for
1628 * subsequent requests.
1629 */
1630static void xol_free_insn_slot(struct task_struct *tsk)
1631{
1632	struct xol_area *area;
1633	unsigned long vma_end;
1634	unsigned long slot_addr;
1635
1636	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1637		return;
1638
1639	slot_addr = tsk->utask->xol_vaddr;
1640	if (unlikely(!slot_addr))
1641		return;
1642
1643	area = tsk->mm->uprobes_state.xol_area;
1644	vma_end = area->vaddr + PAGE_SIZE;
1645	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1646		unsigned long offset;
1647		int slot_nr;
1648
1649		offset = slot_addr - area->vaddr;
1650		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1651		if (slot_nr >= UINSNS_PER_PAGE)
1652			return;
1653
1654		clear_bit(slot_nr, area->bitmap);
1655		atomic_dec(&area->slot_count);
1656		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1657		if (waitqueue_active(&area->wq))
1658			wake_up(&area->wq);
1659
1660		tsk->utask->xol_vaddr = 0;
1661	}
1662}
1663
1664void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1665				  void *src, unsigned long len)
1666{
1667	/* Initialize the slot */
1668	copy_to_page(page, vaddr, src, len);
1669
1670	/*
1671	 * We probably need flush_icache_user_page() but it needs vma.
1672	 * This should work on most of architectures by default. If
1673	 * architecture needs to do something different it can define
1674	 * its own version of the function.
1675	 */
1676	flush_dcache_page(page);
1677}
1678
1679/**
1680 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1681 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1682 * instruction.
1683 * Return the address of the breakpoint instruction.
1684 */
1685unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1686{
1687	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1688}
1689
1690unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1691{
1692	struct uprobe_task *utask = current->utask;
1693
1694	if (unlikely(utask && utask->active_uprobe))
1695		return utask->vaddr;
1696
1697	return instruction_pointer(regs);
1698}
1699
1700static struct return_instance *free_ret_instance(struct return_instance *ri)
1701{
1702	struct return_instance *next = ri->next;
1703	put_uprobe(ri->uprobe);
1704	kfree(ri);
1705	return next;
1706}
1707
1708/*
1709 * Called with no locks held.
1710 * Called in context of an exiting or an exec-ing thread.
1711 */
1712void uprobe_free_utask(struct task_struct *t)
1713{
1714	struct uprobe_task *utask = t->utask;
1715	struct return_instance *ri;
1716
1717	if (!utask)
1718		return;
1719
1720	if (utask->active_uprobe)
1721		put_uprobe(utask->active_uprobe);
1722
1723	ri = utask->return_instances;
1724	while (ri)
1725		ri = free_ret_instance(ri);
1726
1727	xol_free_insn_slot(t);
1728	kfree(utask);
1729	t->utask = NULL;
1730}
1731
1732/*
1733 * Allocate a uprobe_task object for the task if necessary.
1734 * Called when the thread hits a breakpoint.
1735 *
1736 * Returns:
1737 * - pointer to new uprobe_task on success
1738 * - NULL otherwise
1739 */
1740static struct uprobe_task *get_utask(void)
1741{
1742	if (!current->utask)
1743		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1744	return current->utask;
1745}
1746
1747static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1748{
1749	struct uprobe_task *n_utask;
1750	struct return_instance **p, *o, *n;
1751
1752	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1753	if (!n_utask)
1754		return -ENOMEM;
1755	t->utask = n_utask;
1756
1757	p = &n_utask->return_instances;
1758	for (o = o_utask->return_instances; o; o = o->next) {
1759		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1760		if (!n)
1761			return -ENOMEM;
1762
1763		*n = *o;
1764		get_uprobe(n->uprobe);
1765		n->next = NULL;
1766
1767		*p = n;
1768		p = &n->next;
1769		n_utask->depth++;
1770	}
1771
1772	return 0;
1773}
1774
1775static void uprobe_warn(struct task_struct *t, const char *msg)
1776{
1777	pr_warn("uprobe: %s:%d failed to %s\n",
1778			current->comm, current->pid, msg);
1779}
1780
1781static void dup_xol_work(struct callback_head *work)
1782{
1783	if (current->flags & PF_EXITING)
1784		return;
1785
1786	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1787			!fatal_signal_pending(current))
1788		uprobe_warn(current, "dup xol area");
1789}
1790
1791/*
1792 * Called in context of a new clone/fork from copy_process.
1793 */
1794void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1795{
1796	struct uprobe_task *utask = current->utask;
1797	struct mm_struct *mm = current->mm;
1798	struct xol_area *area;
1799
1800	t->utask = NULL;
1801
1802	if (!utask || !utask->return_instances)
1803		return;
1804
1805	if (mm == t->mm && !(flags & CLONE_VFORK))
1806		return;
1807
1808	if (dup_utask(t, utask))
1809		return uprobe_warn(t, "dup ret instances");
1810
1811	/* The task can fork() after dup_xol_work() fails */
1812	area = mm->uprobes_state.xol_area;
1813	if (!area)
1814		return uprobe_warn(t, "dup xol area");
1815
1816	if (mm == t->mm)
1817		return;
1818
1819	t->utask->dup_xol_addr = area->vaddr;
1820	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1821	task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
1822}
1823
1824/*
1825 * Current area->vaddr notion assume the trampoline address is always
1826 * equal area->vaddr.
1827 *
1828 * Returns -1 in case the xol_area is not allocated.
1829 */
1830static unsigned long get_trampoline_vaddr(void)
1831{
1832	struct xol_area *area;
1833	unsigned long trampoline_vaddr = -1;
1834
1835	/* Pairs with xol_add_vma() smp_store_release() */
1836	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1837	if (area)
1838		trampoline_vaddr = area->vaddr;
1839
1840	return trampoline_vaddr;
1841}
1842
1843static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1844					struct pt_regs *regs)
1845{
1846	struct return_instance *ri = utask->return_instances;
1847	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1848
1849	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1850		ri = free_ret_instance(ri);
1851		utask->depth--;
1852	}
1853	utask->return_instances = ri;
1854}
1855
1856static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1857{
1858	struct return_instance *ri;
1859	struct uprobe_task *utask;
1860	unsigned long orig_ret_vaddr, trampoline_vaddr;
1861	bool chained;
1862
1863	if (!get_xol_area())
1864		return;
1865
1866	utask = get_utask();
1867	if (!utask)
1868		return;
1869
1870	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1871		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1872				" nestedness limit pid/tgid=%d/%d\n",
1873				current->pid, current->tgid);
1874		return;
1875	}
1876
1877	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1878	if (!ri)
1879		return;
1880
1881	trampoline_vaddr = get_trampoline_vaddr();
1882	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1883	if (orig_ret_vaddr == -1)
1884		goto fail;
1885
1886	/* drop the entries invalidated by longjmp() */
1887	chained = (orig_ret_vaddr == trampoline_vaddr);
1888	cleanup_return_instances(utask, chained, regs);
1889
1890	/*
1891	 * We don't want to keep trampoline address in stack, rather keep the
1892	 * original return address of first caller thru all the consequent
1893	 * instances. This also makes breakpoint unwrapping easier.
1894	 */
1895	if (chained) {
1896		if (!utask->return_instances) {
1897			/*
1898			 * This situation is not possible. Likely we have an
1899			 * attack from user-space.
1900			 */
1901			uprobe_warn(current, "handle tail call");
1902			goto fail;
1903		}
1904		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1905	}
1906
1907	ri->uprobe = get_uprobe(uprobe);
1908	ri->func = instruction_pointer(regs);
1909	ri->stack = user_stack_pointer(regs);
1910	ri->orig_ret_vaddr = orig_ret_vaddr;
1911	ri->chained = chained;
1912
1913	utask->depth++;
1914	ri->next = utask->return_instances;
1915	utask->return_instances = ri;
1916
1917	return;
1918 fail:
1919	kfree(ri);
1920}
1921
1922/* Prepare to single-step probed instruction out of line. */
1923static int
1924pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1925{
1926	struct uprobe_task *utask;
1927	unsigned long xol_vaddr;
1928	int err;
1929
1930	utask = get_utask();
1931	if (!utask)
1932		return -ENOMEM;
1933
1934	xol_vaddr = xol_get_insn_slot(uprobe);
1935	if (!xol_vaddr)
1936		return -ENOMEM;
1937
1938	utask->xol_vaddr = xol_vaddr;
1939	utask->vaddr = bp_vaddr;
1940
1941	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1942	if (unlikely(err)) {
1943		xol_free_insn_slot(current);
1944		return err;
1945	}
1946
1947	utask->active_uprobe = uprobe;
1948	utask->state = UTASK_SSTEP;
1949	return 0;
1950}
1951
1952/*
1953 * If we are singlestepping, then ensure this thread is not connected to
1954 * non-fatal signals until completion of singlestep.  When xol insn itself
1955 * triggers the signal,  restart the original insn even if the task is
1956 * already SIGKILL'ed (since coredump should report the correct ip).  This
1957 * is even more important if the task has a handler for SIGSEGV/etc, The
1958 * _same_ instruction should be repeated again after return from the signal
1959 * handler, and SSTEP can never finish in this case.
1960 */
1961bool uprobe_deny_signal(void)
1962{
1963	struct task_struct *t = current;
1964	struct uprobe_task *utask = t->utask;
1965
1966	if (likely(!utask || !utask->active_uprobe))
1967		return false;
1968
1969	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1970
1971	if (task_sigpending(t)) {
1972		spin_lock_irq(&t->sighand->siglock);
1973		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1974		spin_unlock_irq(&t->sighand->siglock);
1975
1976		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1977			utask->state = UTASK_SSTEP_TRAPPED;
1978			set_tsk_thread_flag(t, TIF_UPROBE);
1979		}
1980	}
1981
1982	return true;
1983}
1984
1985static void mmf_recalc_uprobes(struct mm_struct *mm)
1986{
1987	VMA_ITERATOR(vmi, mm, 0);
1988	struct vm_area_struct *vma;
1989
1990	for_each_vma(vmi, vma) {
1991		if (!valid_vma(vma, false))
1992			continue;
1993		/*
1994		 * This is not strictly accurate, we can race with
1995		 * uprobe_unregister() and see the already removed
1996		 * uprobe if delete_uprobe() was not yet called.
1997		 * Or this uprobe can be filtered out.
1998		 */
1999		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2000			return;
2001	}
2002
2003	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2004}
2005
2006static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2007{
2008	struct page *page;
2009	uprobe_opcode_t opcode;
2010	int result;
2011
2012	if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2013		return -EINVAL;
2014
2015	pagefault_disable();
2016	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2017	pagefault_enable();
2018
2019	if (likely(result == 0))
2020		goto out;
2021
2022	/*
2023	 * The NULL 'tsk' here ensures that any faults that occur here
2024	 * will not be accounted to the task.  'mm' *is* current->mm,
2025	 * but we treat this as a 'remote' access since it is
2026	 * essentially a kernel access to the memory.
2027	 */
2028	result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL);
 
2029	if (result < 0)
2030		return result;
2031
2032	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2033	put_page(page);
2034 out:
2035	/* This needs to return true for any variant of the trap insn */
2036	return is_trap_insn(&opcode);
2037}
2038
2039static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2040{
2041	struct mm_struct *mm = current->mm;
2042	struct uprobe *uprobe = NULL;
2043	struct vm_area_struct *vma;
2044
2045	mmap_read_lock(mm);
2046	vma = vma_lookup(mm, bp_vaddr);
2047	if (vma) {
2048		if (valid_vma(vma, false)) {
2049			struct inode *inode = file_inode(vma->vm_file);
2050			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2051
2052			uprobe = find_uprobe(inode, offset);
2053		}
2054
2055		if (!uprobe)
2056			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2057	} else {
2058		*is_swbp = -EFAULT;
2059	}
2060
2061	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2062		mmf_recalc_uprobes(mm);
2063	mmap_read_unlock(mm);
2064
2065	return uprobe;
2066}
2067
2068static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2069{
2070	struct uprobe_consumer *uc;
2071	int remove = UPROBE_HANDLER_REMOVE;
2072	bool need_prep = false; /* prepare return uprobe, when needed */
2073
2074	down_read(&uprobe->register_rwsem);
2075	for (uc = uprobe->consumers; uc; uc = uc->next) {
2076		int rc = 0;
2077
2078		if (uc->handler) {
2079			rc = uc->handler(uc, regs);
2080			WARN(rc & ~UPROBE_HANDLER_MASK,
2081				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2082		}
2083
2084		if (uc->ret_handler)
2085			need_prep = true;
2086
2087		remove &= rc;
2088	}
2089
2090	if (need_prep && !remove)
2091		prepare_uretprobe(uprobe, regs); /* put bp at return */
2092
2093	if (remove && uprobe->consumers) {
2094		WARN_ON(!uprobe_is_active(uprobe));
2095		unapply_uprobe(uprobe, current->mm);
2096	}
2097	up_read(&uprobe->register_rwsem);
2098}
2099
2100static void
2101handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2102{
2103	struct uprobe *uprobe = ri->uprobe;
2104	struct uprobe_consumer *uc;
2105
2106	down_read(&uprobe->register_rwsem);
2107	for (uc = uprobe->consumers; uc; uc = uc->next) {
2108		if (uc->ret_handler)
2109			uc->ret_handler(uc, ri->func, regs);
2110	}
2111	up_read(&uprobe->register_rwsem);
2112}
2113
2114static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2115{
2116	bool chained;
2117
2118	do {
2119		chained = ri->chained;
2120		ri = ri->next;	/* can't be NULL if chained */
2121	} while (chained);
2122
2123	return ri;
2124}
2125
2126static void handle_trampoline(struct pt_regs *regs)
2127{
2128	struct uprobe_task *utask;
2129	struct return_instance *ri, *next;
2130	bool valid;
2131
2132	utask = current->utask;
2133	if (!utask)
2134		goto sigill;
2135
2136	ri = utask->return_instances;
2137	if (!ri)
2138		goto sigill;
2139
2140	do {
2141		/*
2142		 * We should throw out the frames invalidated by longjmp().
2143		 * If this chain is valid, then the next one should be alive
2144		 * or NULL; the latter case means that nobody but ri->func
2145		 * could hit this trampoline on return. TODO: sigaltstack().
2146		 */
2147		next = find_next_ret_chain(ri);
2148		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2149
2150		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2151		do {
2152			if (valid)
2153				handle_uretprobe_chain(ri, regs);
2154			ri = free_ret_instance(ri);
2155			utask->depth--;
2156		} while (ri != next);
2157	} while (!valid);
2158
2159	utask->return_instances = ri;
2160	return;
2161
2162 sigill:
2163	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2164	force_sig(SIGILL);
2165
2166}
2167
2168bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2169{
2170	return false;
2171}
2172
2173bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2174					struct pt_regs *regs)
2175{
2176	return true;
2177}
2178
2179/*
2180 * Run handler and ask thread to singlestep.
2181 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2182 */
2183static void handle_swbp(struct pt_regs *regs)
2184{
2185	struct uprobe *uprobe;
2186	unsigned long bp_vaddr;
2187	int is_swbp;
2188
2189	bp_vaddr = uprobe_get_swbp_addr(regs);
2190	if (bp_vaddr == get_trampoline_vaddr())
2191		return handle_trampoline(regs);
2192
2193	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2194	if (!uprobe) {
2195		if (is_swbp > 0) {
2196			/* No matching uprobe; signal SIGTRAP. */
2197			force_sig(SIGTRAP);
2198		} else {
2199			/*
2200			 * Either we raced with uprobe_unregister() or we can't
2201			 * access this memory. The latter is only possible if
2202			 * another thread plays with our ->mm. In both cases
2203			 * we can simply restart. If this vma was unmapped we
2204			 * can pretend this insn was not executed yet and get
2205			 * the (correct) SIGSEGV after restart.
2206			 */
2207			instruction_pointer_set(regs, bp_vaddr);
2208		}
2209		return;
2210	}
2211
2212	/* change it in advance for ->handler() and restart */
2213	instruction_pointer_set(regs, bp_vaddr);
2214
2215	/*
2216	 * TODO: move copy_insn/etc into _register and remove this hack.
2217	 * After we hit the bp, _unregister + _register can install the
2218	 * new and not-yet-analyzed uprobe at the same address, restart.
2219	 */
2220	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2221		goto out;
2222
2223	/*
2224	 * Pairs with the smp_wmb() in prepare_uprobe().
2225	 *
2226	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2227	 * we must also see the stores to &uprobe->arch performed by the
2228	 * prepare_uprobe() call.
2229	 */
2230	smp_rmb();
2231
2232	/* Tracing handlers use ->utask to communicate with fetch methods */
2233	if (!get_utask())
2234		goto out;
2235
2236	if (arch_uprobe_ignore(&uprobe->arch, regs))
2237		goto out;
2238
2239	handler_chain(uprobe, regs);
2240
2241	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2242		goto out;
2243
2244	if (!pre_ssout(uprobe, regs, bp_vaddr))
2245		return;
2246
2247	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2248out:
2249	put_uprobe(uprobe);
2250}
2251
2252/*
2253 * Perform required fix-ups and disable singlestep.
2254 * Allow pending signals to take effect.
2255 */
2256static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2257{
2258	struct uprobe *uprobe;
2259	int err = 0;
2260
2261	uprobe = utask->active_uprobe;
2262	if (utask->state == UTASK_SSTEP_ACK)
2263		err = arch_uprobe_post_xol(&uprobe->arch, regs);
2264	else if (utask->state == UTASK_SSTEP_TRAPPED)
2265		arch_uprobe_abort_xol(&uprobe->arch, regs);
2266	else
2267		WARN_ON_ONCE(1);
2268
2269	put_uprobe(uprobe);
2270	utask->active_uprobe = NULL;
2271	utask->state = UTASK_RUNNING;
2272	xol_free_insn_slot(current);
2273
2274	spin_lock_irq(&current->sighand->siglock);
2275	recalc_sigpending(); /* see uprobe_deny_signal() */
2276	spin_unlock_irq(&current->sighand->siglock);
2277
2278	if (unlikely(err)) {
2279		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2280		force_sig(SIGILL);
2281	}
2282}
2283
2284/*
2285 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2286 * allows the thread to return from interrupt. After that handle_swbp()
2287 * sets utask->active_uprobe.
2288 *
2289 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2290 * and allows the thread to return from interrupt.
2291 *
2292 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2293 * uprobe_notify_resume().
2294 */
2295void uprobe_notify_resume(struct pt_regs *regs)
2296{
2297	struct uprobe_task *utask;
2298
2299	clear_thread_flag(TIF_UPROBE);
2300
2301	utask = current->utask;
2302	if (utask && utask->active_uprobe)
2303		handle_singlestep(utask, regs);
2304	else
2305		handle_swbp(regs);
2306}
2307
2308/*
2309 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2310 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2311 */
2312int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2313{
2314	if (!current->mm)
2315		return 0;
2316
2317	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2318	    (!current->utask || !current->utask->return_instances))
2319		return 0;
2320
2321	set_thread_flag(TIF_UPROBE);
2322	return 1;
2323}
2324
2325/*
2326 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2327 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2328 */
2329int uprobe_post_sstep_notifier(struct pt_regs *regs)
2330{
2331	struct uprobe_task *utask = current->utask;
2332
2333	if (!current->mm || !utask || !utask->active_uprobe)
2334		/* task is currently not uprobed */
2335		return 0;
2336
2337	utask->state = UTASK_SSTEP_ACK;
2338	set_thread_flag(TIF_UPROBE);
2339	return 1;
2340}
2341
2342static struct notifier_block uprobe_exception_nb = {
2343	.notifier_call		= arch_uprobe_exception_notify,
2344	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2345};
2346
2347void __init uprobes_init(void)
2348{
2349	int i;
2350
2351	for (i = 0; i < UPROBES_HASH_SZ; i++)
2352		mutex_init(&uprobes_mmap_mutex[i]);
2353
2354	BUG_ON(register_die_notifier(&uprobe_exception_nb));
2355}