Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.5.6
 
   1/*
   2 * User-space Probes (UProbes)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2008-2012
  19 * Authors:
  20 *	Srikar Dronamraju
  21 *	Jim Keniston
  22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  23 */
  24
  25#include <linux/kernel.h>
  26#include <linux/highmem.h>
  27#include <linux/pagemap.h>	/* read_mapping_page */
  28#include <linux/slab.h>
  29#include <linux/sched.h>
 
 
 
  30#include <linux/rmap.h>		/* anon_vma_prepare */
  31#include <linux/mmu_notifier.h>	/* set_pte_at_notify */
  32#include <linux/swap.h>		/* try_to_free_swap */
  33#include <linux/ptrace.h>	/* user_enable_single_step */
  34#include <linux/kdebug.h>	/* notifier mechanism */
 
 
 
 
 
  35
  36#include <linux/uprobes.h>
  37
  38#define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
  39#define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
  40
  41static struct srcu_struct uprobes_srcu;
  42static struct rb_root uprobes_tree = RB_ROOT;
 
 
 
 
 
  43
  44static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
  45
  46#define UPROBES_HASH_SZ	13
  47
  48/* serialize (un)register */
  49static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
  50
  51#define uprobes_hash(v)		(&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  52
  53/* serialize uprobe->pending_list */
  54static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  55#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  56
  57/*
  58 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
  59 * events active at this time.  Probably a fine grained per inode count is
  60 * better?
  61 */
  62static atomic_t uprobe_events = ATOMIC_INIT(0);
  63
  64/*
  65 * Maintain a temporary per vma info that can be used to search if a vma
  66 * has already been handled. This structure is introduced since extending
  67 * vm_area_struct wasnt recommended.
  68 */
  69struct vma_info {
  70	struct list_head	probe_list;
  71	struct mm_struct	*mm;
  72	loff_t			vaddr;
  73};
  74
  75struct uprobe {
  76	struct rb_node		rb_node;	/* node in the rb tree */
  77	atomic_t		ref;
 
  78	struct rw_semaphore	consumer_rwsem;
  79	struct list_head	pending_list;
  80	struct uprobe_consumer	*consumers;
  81	struct inode		*inode;		/* Also hold a ref to inode */
  82	loff_t			offset;
  83	int			flags;
 
 
 
 
 
 
 
 
 
 
 
 
  84	struct arch_uprobe	arch;
  85};
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87/*
  88 * valid_vma: Verify if the specified vma is an executable vma
  89 * Relax restrictions while unregistering: vm_flags might have
  90 * changed after breakpoint was inserted.
  91 *	- is_register: indicates if we are in register context.
  92 *	- Return 1 if the specified virtual address is in an
  93 *	  executable vma.
  94 */
  95static bool valid_vma(struct vm_area_struct *vma, bool is_register)
  96{
  97	if (!vma->vm_file)
  98		return false;
  99
 100	if (!is_register)
 101		return true;
 102
 103	if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
 104		return true;
 105
 106	return false;
 107}
 108
 109static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
 110{
 111	loff_t vaddr;
 112
 113	vaddr = vma->vm_start + offset;
 114	vaddr -= vma->vm_pgoff << PAGE_SHIFT;
 115
 116	return vaddr;
 
 
 117}
 118
 119/**
 120 * __replace_page - replace page in vma by new page.
 121 * based on replace_page in mm/ksm.c
 122 *
 123 * @vma:      vma that holds the pte pointing to page
 124 * @page:     the cowed page we are replacing by kpage
 125 * @kpage:    the modified page we replace page by
 
 
 
 126 *
 127 * Returns 0 on success, -EFAULT on failure.
 128 */
 129static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
 
 130{
 131	struct mm_struct *mm = vma->vm_mm;
 132	pgd_t *pgd;
 133	pud_t *pud;
 134	pmd_t *pmd;
 135	pte_t *ptep;
 136	spinlock_t *ptl;
 137	unsigned long addr;
 138	int err = -EFAULT;
 139
 140	addr = page_address_in_vma(page, vma);
 141	if (addr == -EFAULT)
 142		goto out;
 143
 144	pgd = pgd_offset(mm, addr);
 145	if (!pgd_present(*pgd))
 146		goto out;
 147
 148	pud = pud_offset(pgd, addr);
 149	if (!pud_present(*pud))
 150		goto out;
 151
 152	pmd = pmd_offset(pud, addr);
 153	if (!pmd_present(*pmd))
 154		goto out;
 155
 156	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
 157	if (!ptep)
 158		goto out;
 159
 160	get_page(kpage);
 161	page_add_new_anon_rmap(kpage, vma, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162
 163	if (!PageAnon(page)) {
 164		dec_mm_counter(mm, MM_FILEPAGES);
 165		inc_mm_counter(mm, MM_ANONPAGES);
 166	}
 167
 168	flush_cache_page(vma, addr, pte_pfn(*ptep));
 169	ptep_clear_flush(vma, addr, ptep);
 170	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 171
 172	page_remove_rmap(page);
 173	if (!page_mapped(page))
 174		try_to_free_swap(page);
 175	put_page(page);
 176	pte_unmap_unlock(ptep, ptl);
 177	err = 0;
 178
 179out:
 
 
 
 
 
 
 
 180	return err;
 181}
 182
 183/**
 184 * is_swbp_insn - check if instruction is breakpoint instruction.
 185 * @insn: instruction to be checked.
 186 * Default implementation of is_swbp_insn
 187 * Returns true if @insn is a breakpoint instruction.
 188 */
 189bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 190{
 191	return *insn == UPROBE_SWBP_INSN;
 192}
 193
 194/*
 195 * NOTE:
 196 * Expect the breakpoint instruction to be the smallest size instruction for
 197 * the architecture. If an arch has variable length instruction and the
 198 * breakpoint instruction is not of the smallest length instruction
 199 * supported by that architecture then we need to modify read_opcode /
 200 * write_opcode accordingly. This would never be a problem for archs that
 201 * have fixed length instructions.
 202 */
 203
 204/*
 205 * write_opcode - write the opcode at a given virtual address.
 206 * @auprobe: arch breakpointing information.
 207 * @mm: the probed process address space.
 208 * @vaddr: the virtual address to store the opcode.
 209 * @opcode: opcode to be written at @vaddr.
 210 *
 211 * Called with mm->mmap_sem held (for read and with a reference to
 212 * mm).
 213 *
 214 * For mm @mm, write the opcode at @vaddr.
 215 * Return 0 (success) or a negative errno.
 216 */
 217static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 218			unsigned long vaddr, uprobe_opcode_t opcode)
 219{
 220	struct page *old_page, *new_page;
 221	struct address_space *mapping;
 222	void *vaddr_old, *vaddr_new;
 223	struct vm_area_struct *vma;
 224	struct uprobe *uprobe;
 225	loff_t addr;
 226	int ret;
 227
 228	/* Read the page with vaddr into memory */
 229	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
 230	if (ret <= 0)
 231		return ret;
 
 
 232
 233	ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 234
 235	/*
 236	 * We are interested in text pages only. Our pages of interest
 237	 * should be mapped for read and execute only. We desist from
 238	 * adding probes in write mapped pages since the breakpoints
 239	 * might end up in the file copy.
 
 
 
 240	 */
 241	if (!valid_vma(vma, is_swbp_insn(&opcode)))
 242		goto put_out;
 243
 244	uprobe = container_of(auprobe, struct uprobe, arch);
 245	mapping = uprobe->inode->i_mapping;
 246	if (mapping != vma->vm_file->f_mapping)
 247		goto put_out;
 248
 249	addr = vma_address(vma, uprobe->offset);
 250	if (vaddr != (unsigned long)addr)
 251		goto put_out;
 252
 253	ret = -ENOMEM;
 254	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
 255	if (!new_page)
 256		goto put_out;
 257
 258	__SetPageUptodate(new_page);
 
 
 
 259
 260	/*
 261	 * lock page will serialize against do_wp_page()'s
 262	 * PageAnon() handling
 263	 */
 264	lock_page(old_page);
 265	/* copy the page now that we've got it stable */
 266	vaddr_old = kmap_atomic(old_page);
 267	vaddr_new = kmap_atomic(new_page);
 268
 269	memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
 270
 271	/* poke the new insn in, ASSUMES we don't cross page boundary */
 272	vaddr &= ~PAGE_MASK;
 273	BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
 274	memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 275
 276	kunmap_atomic(vaddr_new);
 277	kunmap_atomic(vaddr_old);
 
 278
 279	ret = anon_vma_prepare(vma);
 280	if (ret)
 281		goto unlock_out;
 282
 283	lock_page(new_page);
 284	ret = __replace_page(vma, old_page, new_page);
 285	unlock_page(new_page);
 286
 287unlock_out:
 288	unlock_page(old_page);
 289	page_cache_release(new_page);
 
 
 290
 291put_out:
 292	put_page(old_page);
 
 
 
 
 
 
 
 
 
 
 293
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294	return ret;
 295}
 296
 297/**
 298 * read_opcode - read the opcode at a given virtual address.
 299 * @mm: the probed process address space.
 300 * @vaddr: the virtual address to read the opcode.
 301 * @opcode: location to store the read opcode.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302 *
 303 * Called with mm->mmap_sem held (for read and with a reference to
 304 * mm.
 
 
 305 *
 306 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
 307 * Return 0 (success) or a negative errno.
 308 */
 309static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
 
 310{
 311	struct page *page;
 312	void *vaddr_new;
 313	int ret;
 
 
 
 
 
 
 314
 315	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
 
 
 
 
 
 316	if (ret <= 0)
 317		return ret;
 318
 319	lock_page(page);
 320	vaddr_new = kmap_atomic(page);
 321	vaddr &= ~PAGE_MASK;
 322	memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
 323	kunmap_atomic(vaddr_new);
 324	unlock_page(page);
 325
 326	put_page(page);
 
 
 
 
 327
 328	return 0;
 329}
 
 
 
 330
 331static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
 332{
 333	uprobe_opcode_t opcode;
 334	int result;
 335
 336	result = read_opcode(mm, vaddr, &opcode);
 337	if (result)
 338		return result;
 339
 340	if (is_swbp_insn(&opcode))
 341		return 1;
 
 342
 343	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344}
 345
 346/**
 347 * set_swbp - store breakpoint at a given address.
 348 * @auprobe: arch specific probepoint information.
 349 * @mm: the probed process address space.
 350 * @vaddr: the virtual address to insert the opcode.
 351 *
 352 * For mm @mm, store the breakpoint instruction at @vaddr.
 353 * Return 0 (success) or a negative errno.
 354 */
 355int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 356{
 357	int result;
 358
 359	result = is_swbp_at_addr(mm, vaddr);
 360	if (result == 1)
 361		return -EEXIST;
 362
 363	if (result)
 364		return result;
 365
 366	return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 367}
 368
 369/**
 370 * set_orig_insn - Restore the original instruction.
 371 * @mm: the probed process address space.
 372 * @auprobe: arch specific probepoint information.
 373 * @vaddr: the virtual address to insert the opcode.
 374 * @verify: if true, verify existance of breakpoint instruction.
 375 *
 376 * For mm @mm, restore the original opcode (opcode) at @vaddr.
 377 * Return 0 (success) or a negative errno.
 378 */
 379int __weak
 380set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
 381{
 382	if (verify) {
 383		int result;
 
 384
 385		result = is_swbp_at_addr(mm, vaddr);
 386		if (!result)
 387			return -EINVAL;
 
 
 388
 389		if (result != 1)
 390			return result;
 
 
 
 
 
 
 
 
 
 
 391	}
 392	return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
 393}
 394
 395static int match_uprobe(struct uprobe *l, struct uprobe *r)
 396{
 397	if (l->inode < r->inode)
 398		return -1;
 399
 400	if (l->inode > r->inode)
 401		return 1;
 402
 403	if (l->offset < r->offset)
 404		return -1;
 405
 406	if (l->offset > r->offset)
 407		return 1;
 408
 409	return 0;
 410}
 411
 412static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
 413{
 414	struct uprobe u = { .inode = inode, .offset = offset };
 415	struct rb_node *n = uprobes_tree.rb_node;
 416	struct uprobe *uprobe;
 417	int match;
 418
 419	while (n) {
 420		uprobe = rb_entry(n, struct uprobe, rb_node);
 421		match = match_uprobe(&u, uprobe);
 422		if (!match) {
 423			atomic_inc(&uprobe->ref);
 424			return uprobe;
 425		}
 426
 427		if (match < 0)
 428			n = n->rb_left;
 429		else
 430			n = n->rb_right;
 431	}
 432	return NULL;
 433}
 434
 435/*
 436 * Find a uprobe corresponding to a given inode:offset
 437 * Acquires uprobes_treelock
 438 */
 439static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 440{
 441	struct uprobe *uprobe;
 442	unsigned long flags;
 443
 444	spin_lock_irqsave(&uprobes_treelock, flags);
 445	uprobe = __find_uprobe(inode, offset);
 446	spin_unlock_irqrestore(&uprobes_treelock, flags);
 447
 448	return uprobe;
 449}
 450
 451static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
 452{
 453	struct rb_node **p = &uprobes_tree.rb_node;
 454	struct rb_node *parent = NULL;
 455	struct uprobe *u;
 456	int match;
 457
 458	while (*p) {
 459		parent = *p;
 460		u = rb_entry(parent, struct uprobe, rb_node);
 461		match = match_uprobe(uprobe, u);
 462		if (!match) {
 463			atomic_inc(&u->ref);
 464			return u;
 465		}
 466
 467		if (match < 0)
 468			p = &parent->rb_left;
 469		else
 470			p = &parent->rb_right;
 471
 472	}
 473
 474	u = NULL;
 475	rb_link_node(&uprobe->rb_node, parent, p);
 476	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
 477	/* get access + creation ref */
 478	atomic_set(&uprobe->ref, 2);
 479
 480	return u;
 481}
 482
 483/*
 484 * Acquire uprobes_treelock.
 485 * Matching uprobe already exists in rbtree;
 486 *	increment (access refcount) and return the matching uprobe.
 487 *
 488 * No matching uprobe; insert the uprobe in rb_tree;
 489 *	get a double refcount (access + creation) and return NULL.
 490 */
 491static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 492{
 493	unsigned long flags;
 494	struct uprobe *u;
 495
 496	spin_lock_irqsave(&uprobes_treelock, flags);
 497	u = __insert_uprobe(uprobe);
 498	spin_unlock_irqrestore(&uprobes_treelock, flags);
 499
 500	/* For now assume that the instruction need not be single-stepped */
 501	uprobe->flags |= UPROBE_SKIP_SSTEP;
 502
 503	return u;
 504}
 505
 506static void put_uprobe(struct uprobe *uprobe)
 
 507{
 508	if (atomic_dec_and_test(&uprobe->ref))
 509		kfree(uprobe);
 
 
 
 510}
 511
 512static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 
 513{
 514	struct uprobe *uprobe, *cur_uprobe;
 515
 516	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
 517	if (!uprobe)
 518		return NULL;
 519
 520	uprobe->inode = igrab(inode);
 521	uprobe->offset = offset;
 
 
 522	init_rwsem(&uprobe->consumer_rwsem);
 523	INIT_LIST_HEAD(&uprobe->pending_list);
 524
 525	/* add to uprobes_tree, sorted on inode:offset */
 526	cur_uprobe = insert_uprobe(uprobe);
 527
 528	/* a uprobe exists for this inode:offset combination */
 529	if (cur_uprobe) {
 
 
 
 
 
 
 530		kfree(uprobe);
 531		uprobe = cur_uprobe;
 532		iput(inode);
 533	} else {
 534		atomic_inc(&uprobe_events);
 535	}
 536
 537	return uprobe;
 538}
 539
 540static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
 541{
 542	struct uprobe_consumer *uc;
 543
 544	if (!(uprobe->flags & UPROBE_RUN_HANDLER))
 545		return;
 546
 547	down_read(&uprobe->consumer_rwsem);
 548	for (uc = uprobe->consumers; uc; uc = uc->next) {
 549		if (!uc->filter || uc->filter(uc, current))
 550			uc->handler(uc, regs);
 551	}
 552	up_read(&uprobe->consumer_rwsem);
 553}
 554
 555/* Returns the previous consumer */
 556static struct uprobe_consumer *
 557consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 558{
 559	down_write(&uprobe->consumer_rwsem);
 560	uc->next = uprobe->consumers;
 561	uprobe->consumers = uc;
 562	up_write(&uprobe->consumer_rwsem);
 563
 564	return uc->next;
 565}
 566
 567/*
 568 * For uprobe @uprobe, delete the consumer @uc.
 569 * Return true if the @uc is deleted successfully
 570 * or return false.
 571 */
 572static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 573{
 574	struct uprobe_consumer **con;
 575	bool ret = false;
 576
 577	down_write(&uprobe->consumer_rwsem);
 578	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
 579		if (*con == uc) {
 580			*con = uc->next;
 581			ret = true;
 582			break;
 583		}
 584	}
 585	up_write(&uprobe->consumer_rwsem);
 586
 587	return ret;
 588}
 589
 590static int
 591__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
 592			unsigned long nbytes, unsigned long offset)
 593{
 594	struct file *filp = vma->vm_file;
 595	struct page *page;
 596	void *vaddr;
 597	unsigned long off1;
 598	unsigned long idx;
 599
 600	if (!filp)
 601		return -EINVAL;
 602
 603	idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
 604	off1 = offset &= ~PAGE_MASK;
 605
 606	/*
 607	 * Ensure that the page that has the original instruction is
 608	 * populated and in page-cache.
 
 609	 */
 610	page = read_mapping_page(mapping, idx, filp);
 
 
 
 611	if (IS_ERR(page))
 612		return PTR_ERR(page);
 613
 614	vaddr = kmap_atomic(page);
 615	memcpy(insn, vaddr + off1, nbytes);
 616	kunmap_atomic(vaddr);
 617	page_cache_release(page);
 618
 619	return 0;
 620}
 621
 622static int
 623copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 624{
 625	struct address_space *mapping;
 626	unsigned long nbytes;
 627	int bytes;
 628
 629	addr &= ~PAGE_MASK;
 630	nbytes = PAGE_SIZE - addr;
 631	mapping = uprobe->inode->i_mapping;
 632
 633	/* Instruction at end of binary; copy only available bytes */
 634	if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
 635		bytes = uprobe->inode->i_size - uprobe->offset;
 636	else
 637		bytes = MAX_UINSN_BYTES;
 638
 639	/* Instruction at the page-boundary; copy bytes in second page */
 640	if (nbytes < bytes) {
 641		if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
 642				bytes - nbytes, uprobe->offset + nbytes))
 643			return -ENOMEM;
 644
 645		bytes = nbytes;
 646	}
 647	return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
 
 
 
 
 
 
 
 
 648}
 649
 650/*
 651 * How mm->uprobes_state.count gets updated
 652 * uprobe_mmap() increments the count if
 653 * 	- it successfully adds a breakpoint.
 654 * 	- it cannot add a breakpoint, but sees that there is a underlying
 655 * 	  breakpoint (via a is_swbp_at_addr()).
 656 *
 657 * uprobe_munmap() decrements the count if
 658 * 	- it sees a underlying breakpoint, (via is_swbp_at_addr)
 659 * 	  (Subsequent uprobe_unregister wouldnt find the breakpoint
 660 * 	  unless a uprobe_mmap kicks in, since the old vma would be
 661 * 	  dropped just after uprobe_munmap.)
 662 *
 663 * uprobe_register increments the count if:
 664 * 	- it successfully adds a breakpoint.
 665 *
 666 * uprobe_unregister decrements the count if:
 667 * 	- it sees a underlying breakpoint and removes successfully.
 668 * 	  (via is_swbp_at_addr)
 669 * 	  (Subsequent uprobe_munmap wouldnt find the breakpoint
 670 * 	  since there is no underlying breakpoint after the
 671 * 	  breakpoint removal.)
 672 */
 673static int
 674install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 675			struct vm_area_struct *vma, loff_t vaddr)
 676{
 677	unsigned long addr;
 678	int ret;
 679
 680	/*
 681	 * If probe is being deleted, unregister thread could be done with
 682	 * the vma-rmap-walk through. Adding a probe now can be fatal since
 683	 * nobody will be able to cleanup. Also we could be from fork or
 684	 * mremap path, where the probe might have already been inserted.
 685	 * Hence behave as if probe already existed.
 686	 */
 687	if (!uprobe->consumers)
 688		return -EEXIST;
 689
 690	addr = (unsigned long)vaddr;
 
 
 
 691
 692	if (!(uprobe->flags & UPROBE_COPY_INSN)) {
 693		ret = copy_insn(uprobe, vma, addr);
 694		if (ret)
 695			return ret;
 696
 697		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
 698			return -EEXIST;
 
 699
 700		ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
 701		if (ret)
 702			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 703
 704		uprobe->flags |= UPROBE_COPY_INSN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706
 707	/*
 708	 * Ideally, should be updating the probe count after the breakpoint
 709	 * has been successfully inserted. However a thread could hit the
 710	 * breakpoint we just inserted even before the probe count is
 711	 * incremented. If this is the first breakpoint placed, breakpoint
 712	 * notifier might ignore uprobes and pass the trap to the thread.
 713	 * Hence increment before and decrement on failure.
 714	 */
 715	atomic_inc(&mm->uprobes_state.count);
 716	ret = set_swbp(&uprobe->arch, mm, addr);
 717	if (ret)
 718		atomic_dec(&mm->uprobes_state.count);
 
 
 
 
 
 719
 720	return ret;
 721}
 722
 723static void
 724remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
 725{
 726	if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
 727		atomic_dec(&mm->uprobes_state.count);
 728}
 729
 
 
 
 
 730/*
 731 * There could be threads that have hit the breakpoint and are entering the
 732 * notifier code and trying to acquire the uprobes_treelock. The thread
 733 * calling delete_uprobe() that is removing the uprobe from the rb_tree can
 734 * race with these threads and might acquire the uprobes_treelock compared
 735 * to some of the breakpoint hit threads. In such a case, the breakpoint
 736 * hit threads will not find the uprobe. The current unregistering thread
 737 * waits till all other threads have hit a breakpoint, to acquire the
 738 * uprobes_treelock before the uprobe is removed from the rbtree.
 739 */
 740static void delete_uprobe(struct uprobe *uprobe)
 741{
 742	unsigned long flags;
 
 743
 744	synchronize_srcu(&uprobes_srcu);
 745	spin_lock_irqsave(&uprobes_treelock, flags);
 746	rb_erase(&uprobe->rb_node, &uprobes_tree);
 747	spin_unlock_irqrestore(&uprobes_treelock, flags);
 748	iput(uprobe->inode);
 749	put_uprobe(uprobe);
 750	atomic_dec(&uprobe_events);
 751}
 752
 753static struct vma_info *
 754__find_next_vma_info(struct address_space *mapping, struct list_head *head,
 755			struct vma_info *vi, loff_t offset, bool is_register)
 756{
 757	struct prio_tree_iter iter;
 758	struct vm_area_struct *vma;
 759	struct vma_info *tmpvi;
 760	unsigned long pgoff;
 761	int existing_vma;
 762	loff_t vaddr;
 763
 764	pgoff = offset >> PAGE_SHIFT;
 
 
 
 
 
 765
 766	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 
 
 
 
 
 
 
 
 
 
 
 
 767		if (!valid_vma(vma, is_register))
 768			continue;
 769
 770		existing_vma = 0;
 771		vaddr = vma_address(vma, offset);
 772
 773		list_for_each_entry(tmpvi, head, probe_list) {
 774			if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
 775				existing_vma = 1;
 776				break;
 777			}
 
 778		}
 779
 780		/*
 781		 * Another vma needs a probe to be installed. However skip
 782		 * installing the probe if the vma is about to be unlinked.
 783		 */
 784		if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
 785			vi->mm = vma->vm_mm;
 786			vi->vaddr = vaddr;
 787			list_add(&vi->probe_list, head);
 788
 789			return vi;
 790		}
 791	}
 792
 793	return NULL;
 794}
 795
 796/*
 797 * Iterate in the rmap prio tree  and find a vma where a probe has not
 798 * yet been inserted.
 799 */
 800static struct vma_info *
 801find_next_vma_info(struct address_space *mapping, struct list_head *head,
 802		loff_t offset, bool is_register)
 803{
 804	struct vma_info *vi, *retvi;
 805
 806	vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
 807	if (!vi)
 808		return ERR_PTR(-ENOMEM);
 
 809
 810	mutex_lock(&mapping->i_mmap_mutex);
 811	retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
 812	mutex_unlock(&mapping->i_mmap_mutex);
 813
 814	if (!retvi)
 815		kfree(vi);
 
 
 
 816
 817	return retvi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 818}
 819
 820static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 
 821{
 822	struct list_head try_list;
 823	struct vm_area_struct *vma;
 824	struct address_space *mapping;
 825	struct vma_info *vi, *tmpvi;
 826	struct mm_struct *mm;
 827	loff_t vaddr;
 828	int ret;
 829
 830	mapping = uprobe->inode->i_mapping;
 831	INIT_LIST_HEAD(&try_list);
 832
 833	ret = 0;
 834
 835	for (;;) {
 836		vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
 837		if (!vi)
 838			break;
 839
 840		if (IS_ERR(vi)) {
 841			ret = PTR_ERR(vi);
 842			break;
 843		}
 844
 845		mm = vi->mm;
 846		down_read(&mm->mmap_sem);
 847		vma = find_vma(mm, (unsigned long)vi->vaddr);
 848		if (!vma || !valid_vma(vma, is_register)) {
 849			list_del(&vi->probe_list);
 850			kfree(vi);
 851			up_read(&mm->mmap_sem);
 852			mmput(mm);
 853			continue;
 854		}
 855		vaddr = vma_address(vma, uprobe->offset);
 856		if (vma->vm_file->f_mapping->host != uprobe->inode ||
 857						vaddr != vi->vaddr) {
 858			list_del(&vi->probe_list);
 859			kfree(vi);
 860			up_read(&mm->mmap_sem);
 861			mmput(mm);
 862			continue;
 863		}
 864
 865		if (is_register)
 866			ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
 867		else
 868			remove_breakpoint(uprobe, mm, vi->vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 869
 870		up_read(&mm->mmap_sem);
 871		mmput(mm);
 872		if (is_register) {
 873			if (ret && ret == -EEXIST)
 874				ret = 0;
 875			if (ret)
 876				break;
 
 
 
 
 877		}
 878	}
 879
 880	list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
 881		list_del(&vi->probe_list);
 882		kfree(vi);
 
 
 883	}
 884
 885	return ret;
 
 886}
 887
 888static int __uprobe_register(struct uprobe *uprobe)
 
 889{
 890	return register_for_each_vma(uprobe, true);
 
 
 
 
 
 
 
 
 891}
 892
 893static void __uprobe_unregister(struct uprobe *uprobe)
 
 
 
 
 
 
 894{
 895	if (!register_for_each_vma(uprobe, false))
 896		delete_uprobe(uprobe);
 897
 898	/* TODO : cant unregister? schedule a worker thread */
 
 
 
 
 
 
 
 899}
 
 900
 901/*
 902 * uprobe_register - register a probe
 903 * @inode: the file in which the probe has to be placed.
 904 * @offset: offset from the start of the file.
 905 * @uc: information on howto handle the probe..
 906 *
 907 * Apart from the access refcount, uprobe_register() takes a creation
 908 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
 909 * inserted into the rbtree (i.e first consumer for a @inode:@offset
 910 * tuple).  Creation refcount stops uprobe_unregister from freeing the
 911 * @uprobe even before the register operation is complete. Creation
 912 * refcount is released when the last @uc for the @uprobe
 913 * unregisters.
 
 914 *
 915 * Return errno if it cannot successully install probes
 916 * else return 0 (success)
 917 */
 918int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 
 919{
 920	struct uprobe *uprobe;
 921	int ret;
 922
 923	if (!inode || !uc || uc->next)
 
 924		return -EINVAL;
 925
 
 
 
 
 926	if (offset > i_size_read(inode))
 927		return -EINVAL;
 928
 929	ret = 0;
 930	mutex_lock(uprobes_hash(inode));
 931	uprobe = alloc_uprobe(inode, offset);
 
 
 
 932
 933	if (uprobe && !consumer_add(uprobe, uc)) {
 934		ret = __uprobe_register(uprobe);
 935		if (ret) {
 936			uprobe->consumers = NULL;
 937			__uprobe_unregister(uprobe);
 938		} else {
 939			uprobe->flags |= UPROBE_RUN_HANDLER;
 940		}
 
 
 
 941	}
 942
 943	mutex_unlock(uprobes_hash(inode));
 944	put_uprobe(uprobe);
 945
 
 
 946	return ret;
 947}
 948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949/*
 950 * uprobe_unregister - unregister a already registered probe.
 951 * @inode: the file in which the probe has to be removed.
 952 * @offset: offset from the start of the file.
 953 * @uc: identify which probe if multiple probes are colocated.
 
 954 */
 955void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 
 956{
 957	struct uprobe *uprobe;
 958
 959	if (!inode || !uc)
 960		return;
 961
 962	uprobe = find_uprobe(inode, offset);
 963	if (!uprobe)
 964		return;
 965
 966	mutex_lock(uprobes_hash(inode));
 
 
 
 
 
 
 967
 968	if (consumer_del(uprobe, uc)) {
 969		if (!uprobe->consumers) {
 970			__uprobe_unregister(uprobe);
 971			uprobe->flags &= ~UPROBE_RUN_HANDLER;
 972		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973	}
 
 974
 975	mutex_unlock(uprobes_hash(inode));
 976	if (uprobe)
 977		put_uprobe(uprobe);
 978}
 979
 980/*
 981 * Of all the nodes that correspond to the given inode, return the node
 982 * with the least offset.
 983 */
 984static struct rb_node *find_least_offset_node(struct inode *inode)
 985{
 986	struct uprobe u = { .inode = inode, .offset = 0};
 987	struct rb_node *n = uprobes_tree.rb_node;
 988	struct rb_node *close_node = NULL;
 989	struct uprobe *uprobe;
 990	int match;
 991
 992	while (n) {
 993		uprobe = rb_entry(n, struct uprobe, rb_node);
 994		match = match_uprobe(&u, uprobe);
 995
 996		if (uprobe->inode == inode)
 997			close_node = n;
 998
 999		if (!match)
1000			return close_node;
1001
1002		if (match < 0)
1003			n = n->rb_left;
1004		else
1005			n = n->rb_right;
 
 
 
 
 
 
 
 
1006	}
1007
1008	return close_node;
1009}
1010
1011/*
1012 * For a given inode, build a list of probes that need to be inserted.
1013 */
1014static void build_probe_list(struct inode *inode, struct list_head *head)
 
 
 
1015{
1016	struct uprobe *uprobe;
1017	unsigned long flags;
1018	struct rb_node *n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019
1020	spin_lock_irqsave(&uprobes_treelock, flags);
 
 
 
 
 
 
1021
1022	n = find_least_offset_node(inode);
 
 
1023
1024	for (; n; n = rb_next(n)) {
1025		uprobe = rb_entry(n, struct uprobe, rb_node);
1026		if (uprobe->inode != inode)
1027			break;
1028
1029		list_add(&uprobe->pending_list, head);
1030		atomic_inc(&uprobe->ref);
 
 
 
 
 
 
1031	}
1032
1033	spin_unlock_irqrestore(&uprobes_treelock, flags);
1034}
1035
1036/*
1037 * Called from mmap_region.
1038 * called with mm->mmap_sem acquired.
1039 *
1040 * Return -ve no if we fail to insert probes and we cannot
1041 * bail-out.
1042 * Return 0 otherwise. i.e:
1043 *
1044 *	- successful insertion of probes
1045 *	- (or) no possible probes to be inserted.
1046 *	- (or) insertion of probes failed but we can bail-out.
1047 */
1048int uprobe_mmap(struct vm_area_struct *vma)
1049{
1050	struct list_head tmp_list;
1051	struct uprobe *uprobe, *u;
1052	struct inode *inode;
1053	int ret, count;
1054
1055	if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
1056		return 0;
1057
1058	inode = vma->vm_file->f_mapping->host;
 
 
 
 
 
 
 
 
1059	if (!inode)
1060		return 0;
1061
1062	INIT_LIST_HEAD(&tmp_list);
1063	mutex_lock(uprobes_mmap_hash(inode));
1064	build_probe_list(inode, &tmp_list);
1065
1066	ret = 0;
1067	count = 0;
1068
 
1069	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1070		loff_t vaddr;
1071
1072		list_del(&uprobe->pending_list);
1073		if (!ret) {
1074			vaddr = vma_address(vma, uprobe->offset);
1075
1076			if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
1077				put_uprobe(uprobe);
1078				continue;
1079			}
1080
1081			ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1082
1083			/* Ignore double add: */
1084			if (ret == -EEXIST) {
1085				ret = 0;
1086
1087				if (!is_swbp_at_addr(vma->vm_mm, vaddr))
1088					continue;
1089
1090				/*
1091				 * Unable to insert a breakpoint, but
1092				 * breakpoint lies underneath. Increment the
1093				 * probe count.
1094				 */
1095				atomic_inc(&vma->vm_mm->uprobes_state.count);
1096			}
1097
1098			if (!ret)
1099				count++;
1100		}
1101		put_uprobe(uprobe);
1102	}
1103
1104	mutex_unlock(uprobes_mmap_hash(inode));
1105
1106	if (ret)
1107		atomic_sub(count, &vma->vm_mm->uprobes_state.count);
1108
1109	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110}
1111
1112/*
1113 * Called in context of a munmap of a vma.
1114 */
1115void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1116{
1117	struct list_head tmp_list;
1118	struct uprobe *uprobe, *u;
1119	struct inode *inode;
1120
1121	if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
1122		return;
1123
1124	if (!atomic_read(&vma->vm_mm->uprobes_state.count))
1125		return;
1126
1127	inode = vma->vm_file->f_mapping->host;
1128	if (!inode)
1129		return;
1130
1131	INIT_LIST_HEAD(&tmp_list);
1132	mutex_lock(uprobes_mmap_hash(inode));
1133	build_probe_list(inode, &tmp_list);
1134
1135	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1136		loff_t vaddr;
1137
1138		list_del(&uprobe->pending_list);
1139		vaddr = vma_address(vma, uprobe->offset);
1140
1141		if (vaddr >= start && vaddr < end) {
1142			/*
1143			 * An unregister could have removed the probe before
1144			 * unmap. So check before we decrement the count.
1145			 */
1146			if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
1147				atomic_dec(&vma->vm_mm->uprobes_state.count);
1148		}
1149		put_uprobe(uprobe);
1150	}
1151	mutex_unlock(uprobes_mmap_hash(inode));
1152}
1153
1154/* Slot allocation for XOL */
1155static int xol_add_vma(struct xol_area *area)
1156{
1157	struct mm_struct *mm;
1158	int ret;
1159
1160	area->page = alloc_page(GFP_HIGHUSER);
1161	if (!area->page)
1162		return -ENOMEM;
1163
1164	ret = -EALREADY;
1165	mm = current->mm;
1166
1167	down_write(&mm->mmap_sem);
1168	if (mm->uprobes_state.xol_area)
1169		goto fail;
 
1170
1171	ret = -ENOMEM;
1172
1173	/* Try to map as high as possible, this is only a hint. */
1174	area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1175	if (area->vaddr & ~PAGE_MASK) {
1176		ret = area->vaddr;
1177		goto fail;
 
1178	}
1179
1180	ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1181				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1182	if (ret)
 
 
1183		goto fail;
 
1184
1185	smp_wmb();	/* pairs with get_xol_area() */
1186	mm->uprobes_state.xol_area = area;
1187	ret = 0;
1188
1189fail:
 
1190	up_write(&mm->mmap_sem);
1191	if (ret)
1192		__free_page(area->page);
1193
1194	return ret;
1195}
1196
1197static struct xol_area *get_xol_area(struct mm_struct *mm)
1198{
 
 
1199	struct xol_area *area;
1200
1201	area = mm->uprobes_state.xol_area;
1202	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
 
1203
1204	return area;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205}
1206
1207/*
1208 * xol_alloc_area - Allocate process's xol_area.
1209 * This area will be used for storing instructions for execution out of
1210 * line.
1211 *
1212 * Returns the allocated area or NULL.
1213 */
1214static struct xol_area *xol_alloc_area(void)
1215{
 
1216	struct xol_area *area;
1217
1218	area = kzalloc(sizeof(*area), GFP_KERNEL);
1219	if (unlikely(!area))
1220		return NULL;
1221
1222	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1223
1224	if (!area->bitmap)
1225		goto fail;
1226
1227	init_waitqueue_head(&area->wq);
1228	if (!xol_add_vma(area))
1229		return area;
1230
1231fail:
1232	kfree(area->bitmap);
1233	kfree(area);
1234
1235	return get_xol_area(current->mm);
1236}
1237
1238/*
1239 * uprobe_clear_state - Free the area allocated for slots.
1240 */
1241void uprobe_clear_state(struct mm_struct *mm)
1242{
1243	struct xol_area *area = mm->uprobes_state.xol_area;
1244
 
 
 
 
1245	if (!area)
1246		return;
1247
1248	put_page(area->page);
1249	kfree(area->bitmap);
1250	kfree(area);
1251}
1252
1253/*
1254 * uprobe_reset_state - Free the area allocated for slots.
1255 */
1256void uprobe_reset_state(struct mm_struct *mm)
1257{
1258	mm->uprobes_state.xol_area = NULL;
1259	atomic_set(&mm->uprobes_state.count, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
1260}
1261
1262/*
1263 *  - search for a free slot.
1264 */
1265static unsigned long xol_take_insn_slot(struct xol_area *area)
1266{
1267	unsigned long slot_addr;
1268	int slot_nr;
1269
1270	do {
1271		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1272		if (slot_nr < UINSNS_PER_PAGE) {
1273			if (!test_and_set_bit(slot_nr, area->bitmap))
1274				break;
1275
1276			slot_nr = UINSNS_PER_PAGE;
1277			continue;
1278		}
1279		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1280	} while (slot_nr >= UINSNS_PER_PAGE);
1281
1282	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1283	atomic_inc(&area->slot_count);
1284
1285	return slot_addr;
1286}
1287
1288/*
1289 * xol_get_insn_slot - If was not allocated a slot, then
1290 * allocate a slot.
1291 * Returns the allocated slot address or 0.
1292 */
1293static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
1294{
1295	struct xol_area *area;
1296	unsigned long offset;
1297	void *vaddr;
1298
1299	area = get_xol_area(current->mm);
1300	if (!area) {
1301		area = xol_alloc_area();
1302		if (!area)
1303			return 0;
1304	}
1305	current->utask->xol_vaddr = xol_take_insn_slot(area);
1306
1307	/*
1308	 * Initialize the slot if xol_vaddr points to valid
1309	 * instruction slot.
1310	 */
1311	if (unlikely(!current->utask->xol_vaddr))
1312		return 0;
1313
1314	current->utask->vaddr = slot_addr;
1315	offset = current->utask->xol_vaddr & ~PAGE_MASK;
1316	vaddr = kmap_atomic(area->page);
1317	memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
1318	kunmap_atomic(vaddr);
1319
1320	return current->utask->xol_vaddr;
1321}
1322
1323/*
1324 * xol_free_insn_slot - If slot was earlier allocated by
1325 * @xol_get_insn_slot(), make the slot available for
1326 * subsequent requests.
1327 */
1328static void xol_free_insn_slot(struct task_struct *tsk)
1329{
1330	struct xol_area *area;
1331	unsigned long vma_end;
1332	unsigned long slot_addr;
1333
1334	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1335		return;
1336
1337	slot_addr = tsk->utask->xol_vaddr;
1338
1339	if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1340		return;
1341
1342	area = tsk->mm->uprobes_state.xol_area;
1343	vma_end = area->vaddr + PAGE_SIZE;
1344	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1345		unsigned long offset;
1346		int slot_nr;
1347
1348		offset = slot_addr - area->vaddr;
1349		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1350		if (slot_nr >= UINSNS_PER_PAGE)
1351			return;
1352
1353		clear_bit(slot_nr, area->bitmap);
1354		atomic_dec(&area->slot_count);
 
1355		if (waitqueue_active(&area->wq))
1356			wake_up(&area->wq);
1357
1358		tsk->utask->xol_vaddr = 0;
1359	}
1360}
1361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1362/**
1363 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1364 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1365 * instruction.
1366 * Return the address of the breakpoint instruction.
1367 */
1368unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1369{
1370	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1371}
1372
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1373/*
1374 * Called with no locks held.
1375 * Called in context of a exiting or a exec-ing thread.
1376 */
1377void uprobe_free_utask(struct task_struct *t)
1378{
1379	struct uprobe_task *utask = t->utask;
1380
1381	if (t->uprobe_srcu_id != -1)
1382		srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
1383
1384	if (!utask)
1385		return;
1386
1387	if (utask->active_uprobe)
1388		put_uprobe(utask->active_uprobe);
1389
 
 
 
 
1390	xol_free_insn_slot(t);
1391	kfree(utask);
1392	t->utask = NULL;
1393}
1394
1395/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1396 * Called in context of a new clone/fork from copy_process.
1397 */
1398void uprobe_copy_process(struct task_struct *t)
1399{
 
 
 
 
1400	t->utask = NULL;
1401	t->uprobe_srcu_id = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1402}
1403
1404/*
1405 * Allocate a uprobe_task object for the task.
1406 * Called when the thread hits a breakpoint for the first time.
1407 *
1408 * Returns:
1409 * - pointer to new uprobe_task on success
1410 * - NULL otherwise
1411 */
1412static struct uprobe_task *add_utask(void)
1413{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1414	struct uprobe_task *utask;
 
 
1415
1416	utask = kzalloc(sizeof *utask, GFP_KERNEL);
1417	if (unlikely(!utask))
1418		return NULL;
1419
1420	utask->active_uprobe = NULL;
1421	current->utask = utask;
1422	return utask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423}
1424
1425/* Prepare to single-step probed instruction out of line. */
1426static int
1427pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
1428{
1429	if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
1430		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1431
1432	return -EFAULT;
 
 
 
 
 
 
 
 
1433}
1434
1435/*
1436 * If we are singlestepping, then ensure this thread is not connected to
1437 * non-fatal signals until completion of singlestep.  When xol insn itself
1438 * triggers the signal,  restart the original insn even if the task is
1439 * already SIGKILL'ed (since coredump should report the correct ip).  This
1440 * is even more important if the task has a handler for SIGSEGV/etc, The
1441 * _same_ instruction should be repeated again after return from the signal
1442 * handler, and SSTEP can never finish in this case.
1443 */
1444bool uprobe_deny_signal(void)
1445{
1446	struct task_struct *t = current;
1447	struct uprobe_task *utask = t->utask;
1448
1449	if (likely(!utask || !utask->active_uprobe))
1450		return false;
1451
1452	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1453
1454	if (signal_pending(t)) {
1455		spin_lock_irq(&t->sighand->siglock);
1456		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1457		spin_unlock_irq(&t->sighand->siglock);
1458
1459		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1460			utask->state = UTASK_SSTEP_TRAPPED;
1461			set_tsk_thread_flag(t, TIF_UPROBE);
1462			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1463		}
1464	}
1465
1466	return true;
1467}
1468
1469/*
1470 * Avoid singlestepping the original instruction if the original instruction
1471 * is a NOP or can be emulated.
1472 */
1473static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1474{
1475	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1476		return true;
1477
1478	uprobe->flags &= ~UPROBE_SKIP_SSTEP;
1479	return false;
 
 
 
 
 
 
 
 
 
 
 
 
1480}
1481
1482/*
1483 * Run handler and ask thread to singlestep.
1484 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1485 */
1486static void handle_swbp(struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1487{
 
 
1488	struct vm_area_struct *vma;
1489	struct uprobe_task *utask;
1490	struct uprobe *uprobe;
1491	struct mm_struct *mm;
1492	unsigned long bp_vaddr;
1493
1494	uprobe = NULL;
1495	bp_vaddr = uprobe_get_swbp_addr(regs);
1496	mm = current->mm;
1497	down_read(&mm->mmap_sem);
1498	vma = find_vma(mm, bp_vaddr);
 
 
 
 
1499
1500	if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
1501		struct inode *inode;
1502		loff_t offset;
1503
1504		inode = vma->vm_file->f_mapping->host;
1505		offset = bp_vaddr - vma->vm_start;
1506		offset += (vma->vm_pgoff << PAGE_SHIFT);
1507		uprobe = find_uprobe(inode, offset);
1508	}
1509
1510	srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
1511	current->uprobe_srcu_id = -1;
1512	up_read(&mm->mmap_sem);
1513
1514	if (!uprobe) {
1515		/* No matching uprobe; signal SIGTRAP. */
1516		send_sig(SIGTRAP, current, 0);
1517		return;
1518	}
1519
1520	utask = current->utask;
1521	if (!utask) {
1522		utask = add_utask();
1523		/* Cannot allocate; re-execute the instruction. */
1524		if (!utask)
1525			goto cleanup_ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526	}
1527	utask->active_uprobe = uprobe;
1528	handler_chain(uprobe, regs);
1529	if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
1530		goto cleanup_ret;
1531
1532	utask->state = UTASK_SSTEP;
1533	if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1534		user_enable_single_step(current);
1535		return;
 
 
1536	}
 
 
1537
1538cleanup_ret:
1539	if (utask) {
1540		utask->active_uprobe = NULL;
1541		utask->state = UTASK_RUNNING;
 
 
 
 
 
 
1542	}
1543	if (uprobe) {
1544		if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
 
 
 
 
1545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546			/*
1547			 * cannot singlestep; cannot skip instruction;
1548			 * re-execute the instruction.
 
 
 
 
1549			 */
1550			instruction_pointer_set(regs, bp_vaddr);
1551
1552		put_uprobe(uprobe);
1553	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1554}
1555
1556/*
1557 * Perform required fix-ups and disable singlestep.
1558 * Allow pending signals to take effect.
1559 */
1560static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1561{
1562	struct uprobe *uprobe;
 
1563
1564	uprobe = utask->active_uprobe;
1565	if (utask->state == UTASK_SSTEP_ACK)
1566		arch_uprobe_post_xol(&uprobe->arch, regs);
1567	else if (utask->state == UTASK_SSTEP_TRAPPED)
1568		arch_uprobe_abort_xol(&uprobe->arch, regs);
1569	else
1570		WARN_ON_ONCE(1);
1571
1572	put_uprobe(uprobe);
1573	utask->active_uprobe = NULL;
1574	utask->state = UTASK_RUNNING;
1575	user_disable_single_step(current);
1576	xol_free_insn_slot(current);
1577
1578	spin_lock_irq(&current->sighand->siglock);
1579	recalc_sigpending(); /* see uprobe_deny_signal() */
1580	spin_unlock_irq(&current->sighand->siglock);
 
 
 
 
 
1581}
1582
1583/*
1584 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag.  (and on
1585 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1586 * allows the thread to return from interrupt.
1587 *
1588 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1589 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1590 * interrupt.
1591 *
1592 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1593 * uprobe_notify_resume().
1594 */
1595void uprobe_notify_resume(struct pt_regs *regs)
1596{
1597	struct uprobe_task *utask;
1598
 
 
1599	utask = current->utask;
1600	if (!utask || utask->state == UTASK_BP_HIT)
1601		handle_swbp(regs);
1602	else
1603		handle_singlestep(utask, regs);
 
 
1604}
1605
1606/*
1607 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1608 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1609 */
1610int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1611{
1612	struct uprobe_task *utask;
1613
1614	if (!current->mm || !atomic_read(&current->mm->uprobes_state.count))
1615		/* task is currently not uprobed */
1616		return 0;
1617
1618	utask = current->utask;
1619	if (utask)
1620		utask->state = UTASK_BP_HIT;
1621
1622	set_thread_flag(TIF_UPROBE);
1623	current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
1624
1625	return 1;
1626}
1627
1628/*
1629 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1630 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1631 */
1632int uprobe_post_sstep_notifier(struct pt_regs *regs)
1633{
1634	struct uprobe_task *utask = current->utask;
1635
1636	if (!current->mm || !utask || !utask->active_uprobe)
1637		/* task is currently not uprobed */
1638		return 0;
1639
1640	utask->state = UTASK_SSTEP_ACK;
1641	set_thread_flag(TIF_UPROBE);
1642	return 1;
1643}
1644
1645static struct notifier_block uprobe_exception_nb = {
1646	.notifier_call		= arch_uprobe_exception_notify,
1647	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
1648};
1649
1650static int __init init_uprobes(void)
1651{
1652	int i;
1653
1654	for (i = 0; i < UPROBES_HASH_SZ; i++) {
1655		mutex_init(&uprobes_mutex[i]);
1656		mutex_init(&uprobes_mmap_mutex[i]);
1657	}
1658	init_srcu_struct(&uprobes_srcu);
1659
1660	return register_die_notifier(&uprobe_exception_nb);
1661}
1662module_init(init_uprobes);
1663
1664static void __exit exit_uprobes(void)
1665{
1666}
1667module_exit(exit_uprobes);
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * User-space Probes (UProbes)
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) IBM Corporation, 2008-2012
   6 * Authors:
   7 *	Srikar Dronamraju
   8 *	Jim Keniston
   9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/highmem.h>
  14#include <linux/pagemap.h>	/* read_mapping_page */
  15#include <linux/slab.h>
  16#include <linux/sched.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/coredump.h>
  19#include <linux/export.h>
  20#include <linux/rmap.h>		/* anon_vma_prepare */
  21#include <linux/mmu_notifier.h>	/* set_pte_at_notify */
  22#include <linux/swap.h>		/* try_to_free_swap */
  23#include <linux/ptrace.h>	/* user_enable_single_step */
  24#include <linux/kdebug.h>	/* notifier mechanism */
  25#include "../../mm/internal.h"	/* munlock_vma_page */
  26#include <linux/percpu-rwsem.h>
  27#include <linux/task_work.h>
  28#include <linux/shmem_fs.h>
  29#include <linux/khugepaged.h>
  30
  31#include <linux/uprobes.h>
  32
  33#define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
  34#define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
  35
 
  36static struct rb_root uprobes_tree = RB_ROOT;
  37/*
  38 * allows us to skip the uprobe_mmap if there are no uprobe events active
  39 * at this time.  Probably a fine grained per inode count is better?
  40 */
  41#define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
  42
  43static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
  44
  45#define UPROBES_HASH_SZ	13
 
 
 
 
 
 
  46/* serialize uprobe->pending_list */
  47static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  48#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  49
  50DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
 
 
 
 
 
  51
  52/* Have a copy of original instruction */
  53#define UPROBE_COPY_INSN	0
 
 
 
 
 
 
 
 
  54
  55struct uprobe {
  56	struct rb_node		rb_node;	/* node in the rb tree */
  57	refcount_t		ref;
  58	struct rw_semaphore	register_rwsem;
  59	struct rw_semaphore	consumer_rwsem;
  60	struct list_head	pending_list;
  61	struct uprobe_consumer	*consumers;
  62	struct inode		*inode;		/* Also hold a ref to inode */
  63	loff_t			offset;
  64	loff_t			ref_ctr_offset;
  65	unsigned long		flags;
  66
  67	/*
  68	 * The generic code assumes that it has two members of unknown type
  69	 * owned by the arch-specific code:
  70	 *
  71	 * 	insn -	copy_insn() saves the original instruction here for
  72	 *		arch_uprobe_analyze_insn().
  73	 *
  74	 *	ixol -	potentially modified instruction to execute out of
  75	 *		line, copied to xol_area by xol_get_insn_slot().
  76	 */
  77	struct arch_uprobe	arch;
  78};
  79
  80struct delayed_uprobe {
  81	struct list_head list;
  82	struct uprobe *uprobe;
  83	struct mm_struct *mm;
  84};
  85
  86static DEFINE_MUTEX(delayed_uprobe_lock);
  87static LIST_HEAD(delayed_uprobe_list);
  88
  89/*
  90 * Execute out of line area: anonymous executable mapping installed
  91 * by the probed task to execute the copy of the original instruction
  92 * mangled by set_swbp().
  93 *
  94 * On a breakpoint hit, thread contests for a slot.  It frees the
  95 * slot after singlestep. Currently a fixed number of slots are
  96 * allocated.
  97 */
  98struct xol_area {
  99	wait_queue_head_t 		wq;		/* if all slots are busy */
 100	atomic_t 			slot_count;	/* number of in-use slots */
 101	unsigned long 			*bitmap;	/* 0 = free slot */
 102
 103	struct vm_special_mapping	xol_mapping;
 104	struct page 			*pages[2];
 105	/*
 106	 * We keep the vma's vm_start rather than a pointer to the vma
 107	 * itself.  The probed process or a naughty kernel module could make
 108	 * the vma go away, and we must handle that reasonably gracefully.
 109	 */
 110	unsigned long 			vaddr;		/* Page(s) of instruction slots */
 111};
 112
 113/*
 114 * valid_vma: Verify if the specified vma is an executable vma
 115 * Relax restrictions while unregistering: vm_flags might have
 116 * changed after breakpoint was inserted.
 117 *	- is_register: indicates if we are in register context.
 118 *	- Return 1 if the specified virtual address is in an
 119 *	  executable vma.
 120 */
 121static bool valid_vma(struct vm_area_struct *vma, bool is_register)
 122{
 123	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
 
 
 
 
 124
 125	if (is_register)
 126		flags |= VM_WRITE;
 127
 128	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
 129}
 130
 131static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
 132{
 133	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 134}
 
 
 135
 136static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
 137{
 138	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
 139}
 140
 141/**
 142 * __replace_page - replace page in vma by new page.
 143 * based on replace_page in mm/ksm.c
 144 *
 145 * @vma:      vma that holds the pte pointing to page
 146 * @addr:     address the old @page is mapped at
 147 * @old_page: the page we are replacing by new_page
 148 * @new_page: the modified page we replace page by
 149 *
 150 * If @new_page is NULL, only unmap @old_page.
 151 *
 152 * Returns 0 on success, negative error code otherwise.
 153 */
 154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 155				struct page *old_page, struct page *new_page)
 156{
 157	struct mm_struct *mm = vma->vm_mm;
 158	struct page_vma_mapped_walk pvmw = {
 159		.page = compound_head(old_page),
 160		.vma = vma,
 161		.address = addr,
 162	};
 163	int err;
 164	struct mmu_notifier_range range;
 165	struct mem_cgroup *memcg;
 166
 167	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
 168				addr + PAGE_SIZE);
 169
 170	if (new_page) {
 171		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
 172					    &memcg, false);
 173		if (err)
 174			return err;
 175	}
 
 
 
 
 
 176
 177	/* For try_to_free_swap() and munlock_vma_page() below */
 178	lock_page(old_page);
 
 179
 180	mmu_notifier_invalidate_range_start(&range);
 181	err = -EAGAIN;
 182	if (!page_vma_mapped_walk(&pvmw)) {
 183		if (new_page)
 184			mem_cgroup_cancel_charge(new_page, memcg, false);
 185		goto unlock;
 186	}
 187	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 188
 189	if (new_page) {
 190		get_page(new_page);
 191		page_add_new_anon_rmap(new_page, vma, addr, false);
 192		mem_cgroup_commit_charge(new_page, memcg, false, false);
 193		lru_cache_add_active_or_unevictable(new_page, vma);
 194	} else
 195		/* no new page, just dec_mm_counter for old_page */
 196		dec_mm_counter(mm, MM_ANONPAGES);
 197
 198	if (!PageAnon(old_page)) {
 199		dec_mm_counter(mm, mm_counter_file(old_page));
 200		inc_mm_counter(mm, MM_ANONPAGES);
 201	}
 202
 203	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
 204	ptep_clear_flush_notify(vma, addr, pvmw.pte);
 205	if (new_page)
 206		set_pte_at_notify(mm, addr, pvmw.pte,
 207				  mk_pte(new_page, vma->vm_page_prot));
 208
 209	page_remove_rmap(old_page, false);
 210	if (!page_mapped(old_page))
 211		try_to_free_swap(old_page);
 212	page_vma_mapped_walk_done(&pvmw);
 213
 214	if (vma->vm_flags & VM_LOCKED)
 215		munlock_vma_page(old_page);
 216	put_page(old_page);
 217
 218	err = 0;
 219 unlock:
 220	mmu_notifier_invalidate_range_end(&range);
 221	unlock_page(old_page);
 222	return err;
 223}
 224
 225/**
 226 * is_swbp_insn - check if instruction is breakpoint instruction.
 227 * @insn: instruction to be checked.
 228 * Default implementation of is_swbp_insn
 229 * Returns true if @insn is a breakpoint instruction.
 230 */
 231bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 232{
 233	return *insn == UPROBE_SWBP_INSN;
 234}
 235
 236/**
 237 * is_trap_insn - check if instruction is breakpoint instruction.
 238 * @insn: instruction to be checked.
 239 * Default implementation of is_trap_insn
 240 * Returns true if @insn is a breakpoint instruction.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241 *
 242 * This function is needed for the case where an architecture has multiple
 243 * trap instructions (like powerpc).
 244 */
 245bool __weak is_trap_insn(uprobe_opcode_t *insn)
 
 246{
 247	return is_swbp_insn(insn);
 248}
 
 
 
 
 
 249
 250static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
 251{
 252	void *kaddr = kmap_atomic(page);
 253	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
 254	kunmap_atomic(kaddr);
 255}
 256
 257static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
 258{
 259	void *kaddr = kmap_atomic(page);
 260	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
 261	kunmap_atomic(kaddr);
 262}
 263
 264static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
 265{
 266	uprobe_opcode_t old_opcode;
 267	bool is_swbp;
 268
 269	/*
 270	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
 271	 * We do not check if it is any other 'trap variant' which could
 272	 * be conditional trap instruction such as the one powerpc supports.
 273	 *
 274	 * The logic is that we do not care if the underlying instruction
 275	 * is a trap variant; uprobes always wins over any other (gdb)
 276	 * breakpoint.
 277	 */
 278	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
 279	is_swbp = is_swbp_insn(&old_opcode);
 280
 281	if (is_swbp_insn(new_opcode)) {
 282		if (is_swbp)		/* register: already installed? */
 283			return 0;
 284	} else {
 285		if (!is_swbp)		/* unregister: was it changed by us? */
 286			return 0;
 287	}
 
 288
 289	return 1;
 290}
 
 
 291
 292static struct delayed_uprobe *
 293delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
 294{
 295	struct delayed_uprobe *du;
 296
 297	list_for_each_entry(du, &delayed_uprobe_list, list)
 298		if (du->uprobe == uprobe && du->mm == mm)
 299			return du;
 300	return NULL;
 301}
 
 
 
 
 
 
 
 
 
 
 302
 303static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
 304{
 305	struct delayed_uprobe *du;
 306
 307	if (delayed_uprobe_check(uprobe, mm))
 308		return 0;
 
 309
 310	du  = kzalloc(sizeof(*du), GFP_KERNEL);
 311	if (!du)
 312		return -ENOMEM;
 313
 314	du->uprobe = uprobe;
 315	du->mm = mm;
 316	list_add(&du->list, &delayed_uprobe_list);
 317	return 0;
 318}
 319
 320static void delayed_uprobe_delete(struct delayed_uprobe *du)
 321{
 322	if (WARN_ON(!du))
 323		return;
 324	list_del(&du->list);
 325	kfree(du);
 326}
 327
 328static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
 329{
 330	struct list_head *pos, *q;
 331	struct delayed_uprobe *du;
 332
 333	if (!uprobe && !mm)
 334		return;
 335
 336	list_for_each_safe(pos, q, &delayed_uprobe_list) {
 337		du = list_entry(pos, struct delayed_uprobe, list);
 338
 339		if (uprobe && du->uprobe != uprobe)
 340			continue;
 341		if (mm && du->mm != mm)
 342			continue;
 343
 344		delayed_uprobe_delete(du);
 345	}
 346}
 347
 348static bool valid_ref_ctr_vma(struct uprobe *uprobe,
 349			      struct vm_area_struct *vma)
 350{
 351	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
 352
 353	return uprobe->ref_ctr_offset &&
 354		vma->vm_file &&
 355		file_inode(vma->vm_file) == uprobe->inode &&
 356		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
 357		vma->vm_start <= vaddr &&
 358		vma->vm_end > vaddr;
 359}
 360
 361static struct vm_area_struct *
 362find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
 363{
 364	struct vm_area_struct *tmp;
 365
 366	for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
 367		if (valid_ref_ctr_vma(uprobe, tmp))
 368			return tmp;
 369
 370	return NULL;
 371}
 372
 373static int
 374__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
 375{
 376	void *kaddr;
 377	struct page *page;
 378	struct vm_area_struct *vma;
 379	int ret;
 380	short *ptr;
 381
 382	if (!vaddr || !d)
 383		return -EINVAL;
 384
 385	ret = get_user_pages_remote(NULL, mm, vaddr, 1,
 386			FOLL_WRITE, &page, &vma, NULL);
 387	if (unlikely(ret <= 0)) {
 388		/*
 389		 * We are asking for 1 page. If get_user_pages_remote() fails,
 390		 * it may return 0, in that case we have to return error.
 391		 */
 392		return ret == 0 ? -EBUSY : ret;
 393	}
 394
 395	kaddr = kmap_atomic(page);
 396	ptr = kaddr + (vaddr & ~PAGE_MASK);
 397
 398	if (unlikely(*ptr + d < 0)) {
 399		pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
 400			"curr val: %d, delta: %d\n", vaddr, *ptr, d);
 401		ret = -EINVAL;
 402		goto out;
 403	}
 404
 405	*ptr += d;
 406	ret = 0;
 407out:
 408	kunmap_atomic(kaddr);
 409	put_page(page);
 410	return ret;
 411}
 412
 413static void update_ref_ctr_warn(struct uprobe *uprobe,
 414				struct mm_struct *mm, short d)
 415{
 416	pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
 417		"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
 418		d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
 419		(unsigned long long) uprobe->offset,
 420		(unsigned long long) uprobe->ref_ctr_offset, mm);
 421}
 422
 423static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
 424			  short d)
 425{
 426	struct vm_area_struct *rc_vma;
 427	unsigned long rc_vaddr;
 428	int ret = 0;
 429
 430	rc_vma = find_ref_ctr_vma(uprobe, mm);
 431
 432	if (rc_vma) {
 433		rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
 434		ret = __update_ref_ctr(mm, rc_vaddr, d);
 435		if (ret)
 436			update_ref_ctr_warn(uprobe, mm, d);
 437
 438		if (d > 0)
 439			return ret;
 440	}
 441
 442	mutex_lock(&delayed_uprobe_lock);
 443	if (d > 0)
 444		ret = delayed_uprobe_add(uprobe, mm);
 445	else
 446		delayed_uprobe_remove(uprobe, mm);
 447	mutex_unlock(&delayed_uprobe_lock);
 448
 449	return ret;
 450}
 451
 452/*
 453 * NOTE:
 454 * Expect the breakpoint instruction to be the smallest size instruction for
 455 * the architecture. If an arch has variable length instruction and the
 456 * breakpoint instruction is not of the smallest length instruction
 457 * supported by that architecture then we need to modify is_trap_at_addr and
 458 * uprobe_write_opcode accordingly. This would never be a problem for archs
 459 * that have fixed length instructions.
 460 *
 461 * uprobe_write_opcode - write the opcode at a given virtual address.
 462 * @mm: the probed process address space.
 463 * @vaddr: the virtual address to store the opcode.
 464 * @opcode: opcode to be written at @vaddr.
 465 *
 466 * Called with mm->mmap_sem held for write.
 467 * Return 0 (success) or a negative errno.
 468 */
 469int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 470			unsigned long vaddr, uprobe_opcode_t opcode)
 471{
 472	struct uprobe *uprobe;
 473	struct page *old_page, *new_page;
 474	struct vm_area_struct *vma;
 475	int ret, is_register, ref_ctr_updated = 0;
 476	bool orig_page_huge = false;
 477	unsigned int gup_flags = FOLL_FORCE;
 478
 479	is_register = is_swbp_insn(&opcode);
 480	uprobe = container_of(auprobe, struct uprobe, arch);
 481
 482retry:
 483	if (is_register)
 484		gup_flags |= FOLL_SPLIT_PMD;
 485	/* Read the page with vaddr into memory */
 486	ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
 487				    &old_page, &vma, NULL);
 488	if (ret <= 0)
 489		return ret;
 490
 491	ret = verify_opcode(old_page, vaddr, &opcode);
 492	if (ret <= 0)
 493		goto put_old;
 
 
 
 494
 495	if (WARN(!is_register && PageCompound(old_page),
 496		 "uprobe unregister should never work on compound page\n")) {
 497		ret = -EINVAL;
 498		goto put_old;
 499	}
 500
 501	/* We are going to replace instruction, update ref_ctr. */
 502	if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
 503		ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
 504		if (ret)
 505			goto put_old;
 506
 507		ref_ctr_updated = 1;
 508	}
 
 
 509
 510	ret = 0;
 511	if (!is_register && !PageAnon(old_page))
 512		goto put_old;
 513
 514	ret = anon_vma_prepare(vma);
 515	if (ret)
 516		goto put_old;
 517
 518	ret = -ENOMEM;
 519	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
 520	if (!new_page)
 521		goto put_old;
 522
 523	__SetPageUptodate(new_page);
 524	copy_highpage(new_page, old_page);
 525	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 526
 527	if (!is_register) {
 528		struct page *orig_page;
 529		pgoff_t index;
 530
 531		VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
 532
 533		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
 534		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
 535					  index);
 536
 537		if (orig_page) {
 538			if (PageUptodate(orig_page) &&
 539			    pages_identical(new_page, orig_page)) {
 540				/* let go new_page */
 541				put_page(new_page);
 542				new_page = NULL;
 543
 544				if (PageCompound(orig_page))
 545					orig_page_huge = true;
 546			}
 547			put_page(orig_page);
 548		}
 549	}
 550
 551	ret = __replace_page(vma, vaddr, old_page, new_page);
 552	if (new_page)
 553		put_page(new_page);
 554put_old:
 555	put_page(old_page);
 556
 557	if (unlikely(ret == -EAGAIN))
 558		goto retry;
 559
 560	/* Revert back reference counter if instruction update failed. */
 561	if (ret && is_register && ref_ctr_updated)
 562		update_ref_ctr(uprobe, mm, -1);
 563
 564	/* try collapse pmd for compound page */
 565	if (!ret && orig_page_huge)
 566		collapse_pte_mapped_thp(mm, vaddr);
 567
 568	return ret;
 569}
 570
 571/**
 572 * set_swbp - store breakpoint at a given address.
 573 * @auprobe: arch specific probepoint information.
 574 * @mm: the probed process address space.
 575 * @vaddr: the virtual address to insert the opcode.
 576 *
 577 * For mm @mm, store the breakpoint instruction at @vaddr.
 578 * Return 0 (success) or a negative errno.
 579 */
 580int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 581{
 582	return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 
 
 
 
 
 
 
 
 
 583}
 584
 585/**
 586 * set_orig_insn - Restore the original instruction.
 587 * @mm: the probed process address space.
 588 * @auprobe: arch specific probepoint information.
 589 * @vaddr: the virtual address to insert the opcode.
 
 590 *
 591 * For mm @mm, restore the original opcode (opcode) at @vaddr.
 592 * Return 0 (success) or a negative errno.
 593 */
 594int __weak
 595set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 596{
 597	return uprobe_write_opcode(auprobe, mm, vaddr,
 598			*(uprobe_opcode_t *)&auprobe->insn);
 599}
 600
 601static struct uprobe *get_uprobe(struct uprobe *uprobe)
 602{
 603	refcount_inc(&uprobe->ref);
 604	return uprobe;
 605}
 606
 607static void put_uprobe(struct uprobe *uprobe)
 608{
 609	if (refcount_dec_and_test(&uprobe->ref)) {
 610		/*
 611		 * If application munmap(exec_vma) before uprobe_unregister()
 612		 * gets called, we don't get a chance to remove uprobe from
 613		 * delayed_uprobe_list from remove_breakpoint(). Do it here.
 614		 */
 615		mutex_lock(&delayed_uprobe_lock);
 616		delayed_uprobe_remove(uprobe, NULL);
 617		mutex_unlock(&delayed_uprobe_lock);
 618		kfree(uprobe);
 619	}
 
 620}
 621
 622static int match_uprobe(struct uprobe *l, struct uprobe *r)
 623{
 624	if (l->inode < r->inode)
 625		return -1;
 626
 627	if (l->inode > r->inode)
 628		return 1;
 629
 630	if (l->offset < r->offset)
 631		return -1;
 632
 633	if (l->offset > r->offset)
 634		return 1;
 635
 636	return 0;
 637}
 638
 639static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
 640{
 641	struct uprobe u = { .inode = inode, .offset = offset };
 642	struct rb_node *n = uprobes_tree.rb_node;
 643	struct uprobe *uprobe;
 644	int match;
 645
 646	while (n) {
 647		uprobe = rb_entry(n, struct uprobe, rb_node);
 648		match = match_uprobe(&u, uprobe);
 649		if (!match)
 650			return get_uprobe(uprobe);
 
 
 651
 652		if (match < 0)
 653			n = n->rb_left;
 654		else
 655			n = n->rb_right;
 656	}
 657	return NULL;
 658}
 659
 660/*
 661 * Find a uprobe corresponding to a given inode:offset
 662 * Acquires uprobes_treelock
 663 */
 664static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 665{
 666	struct uprobe *uprobe;
 
 667
 668	spin_lock(&uprobes_treelock);
 669	uprobe = __find_uprobe(inode, offset);
 670	spin_unlock(&uprobes_treelock);
 671
 672	return uprobe;
 673}
 674
 675static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
 676{
 677	struct rb_node **p = &uprobes_tree.rb_node;
 678	struct rb_node *parent = NULL;
 679	struct uprobe *u;
 680	int match;
 681
 682	while (*p) {
 683		parent = *p;
 684		u = rb_entry(parent, struct uprobe, rb_node);
 685		match = match_uprobe(uprobe, u);
 686		if (!match)
 687			return get_uprobe(u);
 
 
 688
 689		if (match < 0)
 690			p = &parent->rb_left;
 691		else
 692			p = &parent->rb_right;
 693
 694	}
 695
 696	u = NULL;
 697	rb_link_node(&uprobe->rb_node, parent, p);
 698	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
 699	/* get access + creation ref */
 700	refcount_set(&uprobe->ref, 2);
 701
 702	return u;
 703}
 704
 705/*
 706 * Acquire uprobes_treelock.
 707 * Matching uprobe already exists in rbtree;
 708 *	increment (access refcount) and return the matching uprobe.
 709 *
 710 * No matching uprobe; insert the uprobe in rb_tree;
 711 *	get a double refcount (access + creation) and return NULL.
 712 */
 713static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 714{
 
 715	struct uprobe *u;
 716
 717	spin_lock(&uprobes_treelock);
 718	u = __insert_uprobe(uprobe);
 719	spin_unlock(&uprobes_treelock);
 
 
 
 720
 721	return u;
 722}
 723
 724static void
 725ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
 726{
 727	pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
 728		"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
 729		uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
 730		(unsigned long long) cur_uprobe->ref_ctr_offset,
 731		(unsigned long long) uprobe->ref_ctr_offset);
 732}
 733
 734static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
 735				   loff_t ref_ctr_offset)
 736{
 737	struct uprobe *uprobe, *cur_uprobe;
 738
 739	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
 740	if (!uprobe)
 741		return NULL;
 742
 743	uprobe->inode = inode;
 744	uprobe->offset = offset;
 745	uprobe->ref_ctr_offset = ref_ctr_offset;
 746	init_rwsem(&uprobe->register_rwsem);
 747	init_rwsem(&uprobe->consumer_rwsem);
 
 748
 749	/* add to uprobes_tree, sorted on inode:offset */
 750	cur_uprobe = insert_uprobe(uprobe);
 
 751	/* a uprobe exists for this inode:offset combination */
 752	if (cur_uprobe) {
 753		if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
 754			ref_ctr_mismatch_warn(cur_uprobe, uprobe);
 755			put_uprobe(cur_uprobe);
 756			kfree(uprobe);
 757			return ERR_PTR(-EINVAL);
 758		}
 759		kfree(uprobe);
 760		uprobe = cur_uprobe;
 
 
 
 761	}
 762
 763	return uprobe;
 764}
 765
 766static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767{
 768	down_write(&uprobe->consumer_rwsem);
 769	uc->next = uprobe->consumers;
 770	uprobe->consumers = uc;
 771	up_write(&uprobe->consumer_rwsem);
 
 
 772}
 773
 774/*
 775 * For uprobe @uprobe, delete the consumer @uc.
 776 * Return true if the @uc is deleted successfully
 777 * or return false.
 778 */
 779static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 780{
 781	struct uprobe_consumer **con;
 782	bool ret = false;
 783
 784	down_write(&uprobe->consumer_rwsem);
 785	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
 786		if (*con == uc) {
 787			*con = uc->next;
 788			ret = true;
 789			break;
 790		}
 791	}
 792	up_write(&uprobe->consumer_rwsem);
 793
 794	return ret;
 795}
 796
 797static int __copy_insn(struct address_space *mapping, struct file *filp,
 798			void *insn, int nbytes, loff_t offset)
 
 799{
 
 800	struct page *page;
 
 
 
 
 
 
 
 
 
 
 801	/*
 802	 * Ensure that the page that has the original instruction is populated
 803	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
 804	 * see uprobe_register().
 805	 */
 806	if (mapping->a_ops->readpage)
 807		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
 808	else
 809		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 810	if (IS_ERR(page))
 811		return PTR_ERR(page);
 812
 813	copy_from_page(page, offset, insn, nbytes);
 814	put_page(page);
 
 
 815
 816	return 0;
 817}
 818
 819static int copy_insn(struct uprobe *uprobe, struct file *filp)
 
 820{
 821	struct address_space *mapping = uprobe->inode->i_mapping;
 822	loff_t offs = uprobe->offset;
 823	void *insn = &uprobe->arch.insn;
 824	int size = sizeof(uprobe->arch.insn);
 825	int len, err = -EIO;
 
 
 
 
 
 
 
 
 826
 827	/* Copy only available bytes, -EIO if nothing was read */
 828	do {
 829		if (offs >= i_size_read(uprobe->inode))
 830			break;
 
 831
 832		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
 833		err = __copy_insn(mapping, filp, insn, len, offs);
 834		if (err)
 835			break;
 836
 837		insn += len;
 838		offs += len;
 839		size -= len;
 840	} while (size);
 841
 842	return err;
 843}
 844
 845static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
 846				struct mm_struct *mm, unsigned long vaddr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847{
 848	int ret = 0;
 
 849
 850	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 851		return ret;
 
 
 
 
 
 
 
 852
 853	/* TODO: move this into _register, until then we abuse this sem. */
 854	down_write(&uprobe->consumer_rwsem);
 855	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
 856		goto out;
 857
 858	ret = copy_insn(uprobe, file);
 859	if (ret)
 860		goto out;
 
 861
 862	ret = -ENOTSUPP;
 863	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
 864		goto out;
 865
 866	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
 867	if (ret)
 868		goto out;
 869
 870	/* uprobe_write_opcode() assumes we don't cross page boundary */
 871	BUG_ON((uprobe->offset & ~PAGE_MASK) +
 872			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
 873
 874	smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
 875	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 876
 877 out:
 878	up_write(&uprobe->consumer_rwsem);
 879
 880	return ret;
 881}
 882
 883static inline bool consumer_filter(struct uprobe_consumer *uc,
 884				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 885{
 886	return !uc->filter || uc->filter(uc, ctx, mm);
 887}
 888
 889static bool filter_chain(struct uprobe *uprobe,
 890			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 891{
 892	struct uprobe_consumer *uc;
 893	bool ret = false;
 894
 895	down_read(&uprobe->consumer_rwsem);
 896	for (uc = uprobe->consumers; uc; uc = uc->next) {
 897		ret = consumer_filter(uc, ctx, mm);
 898		if (ret)
 899			break;
 900	}
 901	up_read(&uprobe->consumer_rwsem);
 902
 903	return ret;
 904}
 905
 906static int
 907install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 908			struct vm_area_struct *vma, unsigned long vaddr)
 909{
 910	bool first_uprobe;
 911	int ret;
 912
 913	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
 914	if (ret)
 915		return ret;
 916
 917	/*
 918	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
 919	 * the task can hit this breakpoint right after __replace_page().
 
 
 
 
 920	 */
 921	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
 922	if (first_uprobe)
 923		set_bit(MMF_HAS_UPROBES, &mm->flags);
 924
 925	ret = set_swbp(&uprobe->arch, mm, vaddr);
 926	if (!ret)
 927		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
 928	else if (first_uprobe)
 929		clear_bit(MMF_HAS_UPROBES, &mm->flags);
 930
 931	return ret;
 932}
 933
 934static int
 935remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 936{
 937	set_bit(MMF_RECALC_UPROBES, &mm->flags);
 938	return set_orig_insn(&uprobe->arch, mm, vaddr);
 939}
 940
 941static inline bool uprobe_is_active(struct uprobe *uprobe)
 942{
 943	return !RB_EMPTY_NODE(&uprobe->rb_node);
 944}
 945/*
 946 * There could be threads that have already hit the breakpoint. They
 947 * will recheck the current insn and restart if find_uprobe() fails.
 948 * See find_active_uprobe().
 
 
 
 
 
 949 */
 950static void delete_uprobe(struct uprobe *uprobe)
 951{
 952	if (WARN_ON(!uprobe_is_active(uprobe)))
 953		return;
 954
 955	spin_lock(&uprobes_treelock);
 
 956	rb_erase(&uprobe->rb_node, &uprobes_tree);
 957	spin_unlock(&uprobes_treelock);
 958	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
 959	put_uprobe(uprobe);
 
 960}
 961
 962struct map_info {
 963	struct map_info *next;
 964	struct mm_struct *mm;
 965	unsigned long vaddr;
 966};
 
 
 
 
 
 967
 968static inline struct map_info *free_map_info(struct map_info *info)
 969{
 970	struct map_info *next = info->next;
 971	kfree(info);
 972	return next;
 973}
 974
 975static struct map_info *
 976build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
 977{
 978	unsigned long pgoff = offset >> PAGE_SHIFT;
 979	struct vm_area_struct *vma;
 980	struct map_info *curr = NULL;
 981	struct map_info *prev = NULL;
 982	struct map_info *info;
 983	int more = 0;
 984
 985 again:
 986	i_mmap_lock_read(mapping);
 987	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
 988		if (!valid_vma(vma, is_register))
 989			continue;
 990
 991		if (!prev && !more) {
 992			/*
 993			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
 994			 * reclaim. This is optimistic, no harm done if it fails.
 995			 */
 996			prev = kmalloc(sizeof(struct map_info),
 997					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
 998			if (prev)
 999				prev->next = NULL;
1000		}
1001		if (!prev) {
1002			more++;
1003			continue;
 
 
 
 
 
 
 
 
1004		}
 
1005
1006		if (!mmget_not_zero(vma->vm_mm))
1007			continue;
1008
1009		info = prev;
1010		prev = prev->next;
1011		info->next = curr;
1012		curr = info;
 
 
 
 
 
1013
1014		info->mm = vma->vm_mm;
1015		info->vaddr = offset_to_vaddr(vma, offset);
1016	}
1017	i_mmap_unlock_read(mapping);
1018
1019	if (!more)
1020		goto out;
 
1021
1022	prev = curr;
1023	while (curr) {
1024		mmput(curr->mm);
1025		curr = curr->next;
1026	}
1027
1028	do {
1029		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1030		if (!info) {
1031			curr = ERR_PTR(-ENOMEM);
1032			goto out;
1033		}
1034		info->next = prev;
1035		prev = info;
1036	} while (--more);
1037
1038	goto again;
1039 out:
1040	while (prev)
1041		prev = free_map_info(prev);
1042	return curr;
1043}
1044
1045static int
1046register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1047{
1048	bool is_register = !!new;
1049	struct map_info *info;
1050	int err = 0;
1051
1052	percpu_down_write(&dup_mmap_sem);
1053	info = build_map_info(uprobe->inode->i_mapping,
1054					uprobe->offset, is_register);
1055	if (IS_ERR(info)) {
1056		err = PTR_ERR(info);
1057		goto out;
1058	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059
1060	while (info) {
1061		struct mm_struct *mm = info->mm;
1062		struct vm_area_struct *vma;
1063
1064		if (err && is_register)
1065			goto free;
1066
1067		down_write(&mm->mmap_sem);
1068		vma = find_vma(mm, info->vaddr);
1069		if (!vma || !valid_vma(vma, is_register) ||
1070		    file_inode(vma->vm_file) != uprobe->inode)
1071			goto unlock;
1072
1073		if (vma->vm_start > info->vaddr ||
1074		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1075			goto unlock;
1076
 
 
1077		if (is_register) {
1078			/* consult only the "caller", new consumer. */
1079			if (consumer_filter(new,
1080					UPROBE_FILTER_REGISTER, mm))
1081				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1082		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1083			if (!filter_chain(uprobe,
1084					UPROBE_FILTER_UNREGISTER, mm))
1085				err |= remove_breakpoint(uprobe, mm, info->vaddr);
1086		}
 
1087
1088 unlock:
1089		up_write(&mm->mmap_sem);
1090 free:
1091		mmput(mm);
1092		info = free_map_info(info);
1093	}
1094 out:
1095	percpu_up_write(&dup_mmap_sem);
1096	return err;
1097}
1098
1099static void
1100__uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1101{
1102	int err;
1103
1104	if (WARN_ON(!consumer_del(uprobe, uc)))
1105		return;
1106
1107	err = register_for_each_vma(uprobe, NULL);
1108	/* TODO : cant unregister? schedule a worker thread */
1109	if (!uprobe->consumers && !err)
1110		delete_uprobe(uprobe);
1111}
1112
1113/*
1114 * uprobe_unregister - unregister an already registered probe.
1115 * @inode: the file in which the probe has to be removed.
1116 * @offset: offset from the start of the file.
1117 * @uc: identify which probe if multiple probes are colocated.
1118 */
1119void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1120{
1121	struct uprobe *uprobe;
 
1122
1123	uprobe = find_uprobe(inode, offset);
1124	if (WARN_ON(!uprobe))
1125		return;
1126
1127	down_write(&uprobe->register_rwsem);
1128	__uprobe_unregister(uprobe, uc);
1129	up_write(&uprobe->register_rwsem);
1130	put_uprobe(uprobe);
1131}
1132EXPORT_SYMBOL_GPL(uprobe_unregister);
1133
1134/*
1135 * __uprobe_register - register a probe
1136 * @inode: the file in which the probe has to be placed.
1137 * @offset: offset from the start of the file.
1138 * @uc: information on howto handle the probe..
1139 *
1140 * Apart from the access refcount, __uprobe_register() takes a creation
1141 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1142 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1143 * tuple).  Creation refcount stops uprobe_unregister from freeing the
1144 * @uprobe even before the register operation is complete. Creation
1145 * refcount is released when the last @uc for the @uprobe
1146 * unregisters. Caller of __uprobe_register() is required to keep @inode
1147 * (and the containing mount) referenced.
1148 *
1149 * Return errno if it cannot successully install probes
1150 * else return 0 (success)
1151 */
1152static int __uprobe_register(struct inode *inode, loff_t offset,
1153			     loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1154{
1155	struct uprobe *uprobe;
1156	int ret;
1157
1158	/* Uprobe must have at least one set consumer */
1159	if (!uc->handler && !uc->ret_handler)
1160		return -EINVAL;
1161
1162	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1163	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
1164		return -EIO;
1165	/* Racy, just to catch the obvious mistakes */
1166	if (offset > i_size_read(inode))
1167		return -EINVAL;
1168
1169 retry:
1170	uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1171	if (!uprobe)
1172		return -ENOMEM;
1173	if (IS_ERR(uprobe))
1174		return PTR_ERR(uprobe);
1175
1176	/*
1177	 * We can race with uprobe_unregister()->delete_uprobe().
1178	 * Check uprobe_is_active() and retry if it is false.
1179	 */
1180	down_write(&uprobe->register_rwsem);
1181	ret = -EAGAIN;
1182	if (likely(uprobe_is_active(uprobe))) {
1183		consumer_add(uprobe, uc);
1184		ret = register_for_each_vma(uprobe, uc);
1185		if (ret)
1186			__uprobe_unregister(uprobe, uc);
1187	}
1188	up_write(&uprobe->register_rwsem);
 
1189	put_uprobe(uprobe);
1190
1191	if (unlikely(ret == -EAGAIN))
1192		goto retry;
1193	return ret;
1194}
1195
1196int uprobe_register(struct inode *inode, loff_t offset,
1197		    struct uprobe_consumer *uc)
1198{
1199	return __uprobe_register(inode, offset, 0, uc);
1200}
1201EXPORT_SYMBOL_GPL(uprobe_register);
1202
1203int uprobe_register_refctr(struct inode *inode, loff_t offset,
1204			   loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1205{
1206	return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1207}
1208EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1209
1210/*
1211 * uprobe_apply - unregister an already registered probe.
1212 * @inode: the file in which the probe has to be removed.
1213 * @offset: offset from the start of the file.
1214 * @uc: consumer which wants to add more or remove some breakpoints
1215 * @add: add or remove the breakpoints
1216 */
1217int uprobe_apply(struct inode *inode, loff_t offset,
1218			struct uprobe_consumer *uc, bool add)
1219{
1220	struct uprobe *uprobe;
1221	struct uprobe_consumer *con;
1222	int ret = -ENOENT;
 
1223
1224	uprobe = find_uprobe(inode, offset);
1225	if (WARN_ON(!uprobe))
1226		return ret;
1227
1228	down_write(&uprobe->register_rwsem);
1229	for (con = uprobe->consumers; con && con != uc ; con = con->next)
1230		;
1231	if (con)
1232		ret = register_for_each_vma(uprobe, add ? uc : NULL);
1233	up_write(&uprobe->register_rwsem);
1234	put_uprobe(uprobe);
1235
1236	return ret;
1237}
1238
1239static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1240{
1241	struct vm_area_struct *vma;
1242	int err = 0;
1243
1244	down_read(&mm->mmap_sem);
1245	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1246		unsigned long vaddr;
1247		loff_t offset;
1248
1249		if (!valid_vma(vma, false) ||
1250		    file_inode(vma->vm_file) != uprobe->inode)
1251			continue;
1252
1253		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1254		if (uprobe->offset <  offset ||
1255		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1256			continue;
1257
1258		vaddr = offset_to_vaddr(vma, uprobe->offset);
1259		err |= remove_breakpoint(uprobe, mm, vaddr);
1260	}
1261	up_read(&mm->mmap_sem);
1262
1263	return err;
 
 
1264}
1265
1266static struct rb_node *
1267find_node_in_range(struct inode *inode, loff_t min, loff_t max)
 
 
 
1268{
 
1269	struct rb_node *n = uprobes_tree.rb_node;
 
 
 
1270
1271	while (n) {
1272		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
 
 
 
 
1273
1274		if (inode < u->inode) {
 
 
 
1275			n = n->rb_left;
1276		} else if (inode > u->inode) {
1277			n = n->rb_right;
1278		} else {
1279			if (max < u->offset)
1280				n = n->rb_left;
1281			else if (min > u->offset)
1282				n = n->rb_right;
1283			else
1284				break;
1285		}
1286	}
1287
1288	return n;
1289}
1290
1291/*
1292 * For a given range in vma, build a list of probes that need to be inserted.
1293 */
1294static void build_probe_list(struct inode *inode,
1295				struct vm_area_struct *vma,
1296				unsigned long start, unsigned long end,
1297				struct list_head *head)
1298{
1299	loff_t min, max;
1300	struct rb_node *n, *t;
1301	struct uprobe *u;
1302
1303	INIT_LIST_HEAD(head);
1304	min = vaddr_to_offset(vma, start);
1305	max = min + (end - start) - 1;
1306
1307	spin_lock(&uprobes_treelock);
1308	n = find_node_in_range(inode, min, max);
1309	if (n) {
1310		for (t = n; t; t = rb_prev(t)) {
1311			u = rb_entry(t, struct uprobe, rb_node);
1312			if (u->inode != inode || u->offset < min)
1313				break;
1314			list_add(&u->pending_list, head);
1315			get_uprobe(u);
1316		}
1317		for (t = n; (t = rb_next(t)); ) {
1318			u = rb_entry(t, struct uprobe, rb_node);
1319			if (u->inode != inode || u->offset > max)
1320				break;
1321			list_add(&u->pending_list, head);
1322			get_uprobe(u);
1323		}
1324	}
1325	spin_unlock(&uprobes_treelock);
1326}
1327
1328/* @vma contains reference counter, not the probed instruction. */
1329static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1330{
1331	struct list_head *pos, *q;
1332	struct delayed_uprobe *du;
1333	unsigned long vaddr;
1334	int ret = 0, err = 0;
1335
1336	mutex_lock(&delayed_uprobe_lock);
1337	list_for_each_safe(pos, q, &delayed_uprobe_list) {
1338		du = list_entry(pos, struct delayed_uprobe, list);
1339
1340		if (du->mm != vma->vm_mm ||
1341		    !valid_ref_ctr_vma(du->uprobe, vma))
1342			continue;
 
1343
1344		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1345		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1346		if (ret) {
1347			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1348			if (!err)
1349				err = ret;
1350		}
1351		delayed_uprobe_delete(du);
1352	}
1353	mutex_unlock(&delayed_uprobe_lock);
1354	return err;
1355}
1356
1357/*
1358 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
 
1359 *
1360 * Currently we ignore all errors and always return 0, the callers
1361 * can't handle the failure anyway.
 
 
 
 
 
1362 */
1363int uprobe_mmap(struct vm_area_struct *vma)
1364{
1365	struct list_head tmp_list;
1366	struct uprobe *uprobe, *u;
1367	struct inode *inode;
 
1368
1369	if (no_uprobe_events())
1370		return 0;
1371
1372	if (vma->vm_file &&
1373	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1374	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1375		delayed_ref_ctr_inc(vma);
1376
1377	if (!valid_vma(vma, true))
1378		return 0;
1379
1380	inode = file_inode(vma->vm_file);
1381	if (!inode)
1382		return 0;
1383
 
1384	mutex_lock(uprobes_mmap_hash(inode));
1385	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1386	/*
1387	 * We can race with uprobe_unregister(), this uprobe can be already
1388	 * removed. But in this case filter_chain() must return false, all
1389	 * consumers have gone away.
1390	 */
1391	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1392		if (!fatal_signal_pending(current) &&
1393		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1394			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1395			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1396		}
1397		put_uprobe(uprobe);
1398	}
 
1399	mutex_unlock(uprobes_mmap_hash(inode));
1400
1401	return 0;
1402}
1403
1404static bool
1405vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1406{
1407	loff_t min, max;
1408	struct inode *inode;
1409	struct rb_node *n;
1410
1411	inode = file_inode(vma->vm_file);
1412
1413	min = vaddr_to_offset(vma, start);
1414	max = min + (end - start) - 1;
1415
1416	spin_lock(&uprobes_treelock);
1417	n = find_node_in_range(inode, min, max);
1418	spin_unlock(&uprobes_treelock);
1419
1420	return !!n;
1421}
1422
1423/*
1424 * Called in context of a munmap of a vma.
1425 */
1426void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1427{
1428	if (no_uprobe_events() || !valid_vma(vma, false))
 
 
 
 
1429		return;
1430
1431	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1432		return;
1433
1434	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1435	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1436		return;
1437
1438	if (vma_has_uprobes(vma, start, end))
1439		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440}
1441
1442/* Slot allocation for XOL */
1443static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1444{
1445	struct vm_area_struct *vma;
1446	int ret;
1447
1448	if (down_write_killable(&mm->mmap_sem))
1449		return -EINTR;
 
 
 
 
1450
1451	if (mm->uprobes_state.xol_area) {
1452		ret = -EALREADY;
1453		goto fail;
1454	}
1455
1456	if (!area->vaddr) {
1457		/* Try to map as high as possible, this is only a hint. */
1458		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1459						PAGE_SIZE, 0, 0);
1460		if (area->vaddr & ~PAGE_MASK) {
1461			ret = area->vaddr;
1462			goto fail;
1463		}
1464	}
1465
1466	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1467				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1468				&area->xol_mapping);
1469	if (IS_ERR(vma)) {
1470		ret = PTR_ERR(vma);
1471		goto fail;
1472	}
1473
 
 
1474	ret = 0;
1475	/* pairs with get_xol_area() */
1476	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1477 fail:
1478	up_write(&mm->mmap_sem);
 
 
1479
1480	return ret;
1481}
1482
1483static struct xol_area *__create_xol_area(unsigned long vaddr)
1484{
1485	struct mm_struct *mm = current->mm;
1486	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1487	struct xol_area *area;
1488
1489	area = kmalloc(sizeof(*area), GFP_KERNEL);
1490	if (unlikely(!area))
1491		goto out;
1492
1493	area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1494			       GFP_KERNEL);
1495	if (!area->bitmap)
1496		goto free_area;
1497
1498	area->xol_mapping.name = "[uprobes]";
1499	area->xol_mapping.fault = NULL;
1500	area->xol_mapping.pages = area->pages;
1501	area->pages[0] = alloc_page(GFP_HIGHUSER);
1502	if (!area->pages[0])
1503		goto free_bitmap;
1504	area->pages[1] = NULL;
1505
1506	area->vaddr = vaddr;
1507	init_waitqueue_head(&area->wq);
1508	/* Reserve the 1st slot for get_trampoline_vaddr() */
1509	set_bit(0, area->bitmap);
1510	atomic_set(&area->slot_count, 1);
1511	arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1512
1513	if (!xol_add_vma(mm, area))
1514		return area;
1515
1516	__free_page(area->pages[0]);
1517 free_bitmap:
1518	kfree(area->bitmap);
1519 free_area:
1520	kfree(area);
1521 out:
1522	return NULL;
1523}
1524
1525/*
1526 * get_xol_area - Allocate process's xol_area if necessary.
1527 * This area will be used for storing instructions for execution out of line.
 
1528 *
1529 * Returns the allocated area or NULL.
1530 */
1531static struct xol_area *get_xol_area(void)
1532{
1533	struct mm_struct *mm = current->mm;
1534	struct xol_area *area;
1535
1536	if (!mm->uprobes_state.xol_area)
1537		__create_xol_area(0);
 
 
 
1538
1539	/* Pairs with xol_add_vma() smp_store_release() */
1540	area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1541	return area;
 
 
 
 
 
 
 
 
 
1542}
1543
1544/*
1545 * uprobe_clear_state - Free the area allocated for slots.
1546 */
1547void uprobe_clear_state(struct mm_struct *mm)
1548{
1549	struct xol_area *area = mm->uprobes_state.xol_area;
1550
1551	mutex_lock(&delayed_uprobe_lock);
1552	delayed_uprobe_remove(NULL, mm);
1553	mutex_unlock(&delayed_uprobe_lock);
1554
1555	if (!area)
1556		return;
1557
1558	put_page(area->pages[0]);
1559	kfree(area->bitmap);
1560	kfree(area);
1561}
1562
1563void uprobe_start_dup_mmap(void)
 
 
 
1564{
1565	percpu_down_read(&dup_mmap_sem);
1566}
1567
1568void uprobe_end_dup_mmap(void)
1569{
1570	percpu_up_read(&dup_mmap_sem);
1571}
1572
1573void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1574{
1575	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1576		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1577		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1578		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1579	}
1580}
1581
1582/*
1583 *  - search for a free slot.
1584 */
1585static unsigned long xol_take_insn_slot(struct xol_area *area)
1586{
1587	unsigned long slot_addr;
1588	int slot_nr;
1589
1590	do {
1591		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1592		if (slot_nr < UINSNS_PER_PAGE) {
1593			if (!test_and_set_bit(slot_nr, area->bitmap))
1594				break;
1595
1596			slot_nr = UINSNS_PER_PAGE;
1597			continue;
1598		}
1599		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1600	} while (slot_nr >= UINSNS_PER_PAGE);
1601
1602	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1603	atomic_inc(&area->slot_count);
1604
1605	return slot_addr;
1606}
1607
1608/*
1609 * xol_get_insn_slot - allocate a slot for xol.
 
1610 * Returns the allocated slot address or 0.
1611 */
1612static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1613{
1614	struct xol_area *area;
1615	unsigned long xol_vaddr;
 
1616
1617	area = get_xol_area();
1618	if (!area)
1619		return 0;
 
 
 
 
1620
1621	xol_vaddr = xol_take_insn_slot(area);
1622	if (unlikely(!xol_vaddr))
 
 
 
1623		return 0;
1624
1625	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1626			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
 
 
 
1627
1628	return xol_vaddr;
1629}
1630
1631/*
1632 * xol_free_insn_slot - If slot was earlier allocated by
1633 * @xol_get_insn_slot(), make the slot available for
1634 * subsequent requests.
1635 */
1636static void xol_free_insn_slot(struct task_struct *tsk)
1637{
1638	struct xol_area *area;
1639	unsigned long vma_end;
1640	unsigned long slot_addr;
1641
1642	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1643		return;
1644
1645	slot_addr = tsk->utask->xol_vaddr;
1646	if (unlikely(!slot_addr))
 
1647		return;
1648
1649	area = tsk->mm->uprobes_state.xol_area;
1650	vma_end = area->vaddr + PAGE_SIZE;
1651	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1652		unsigned long offset;
1653		int slot_nr;
1654
1655		offset = slot_addr - area->vaddr;
1656		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1657		if (slot_nr >= UINSNS_PER_PAGE)
1658			return;
1659
1660		clear_bit(slot_nr, area->bitmap);
1661		atomic_dec(&area->slot_count);
1662		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1663		if (waitqueue_active(&area->wq))
1664			wake_up(&area->wq);
1665
1666		tsk->utask->xol_vaddr = 0;
1667	}
1668}
1669
1670void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1671				  void *src, unsigned long len)
1672{
1673	/* Initialize the slot */
1674	copy_to_page(page, vaddr, src, len);
1675
1676	/*
1677	 * We probably need flush_icache_user_range() but it needs vma.
1678	 * This should work on most of architectures by default. If
1679	 * architecture needs to do something different it can define
1680	 * its own version of the function.
1681	 */
1682	flush_dcache_page(page);
1683}
1684
1685/**
1686 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1687 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1688 * instruction.
1689 * Return the address of the breakpoint instruction.
1690 */
1691unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1692{
1693	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1694}
1695
1696unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1697{
1698	struct uprobe_task *utask = current->utask;
1699
1700	if (unlikely(utask && utask->active_uprobe))
1701		return utask->vaddr;
1702
1703	return instruction_pointer(regs);
1704}
1705
1706static struct return_instance *free_ret_instance(struct return_instance *ri)
1707{
1708	struct return_instance *next = ri->next;
1709	put_uprobe(ri->uprobe);
1710	kfree(ri);
1711	return next;
1712}
1713
1714/*
1715 * Called with no locks held.
1716 * Called in context of an exiting or an exec-ing thread.
1717 */
1718void uprobe_free_utask(struct task_struct *t)
1719{
1720	struct uprobe_task *utask = t->utask;
1721	struct return_instance *ri;
 
 
1722
1723	if (!utask)
1724		return;
1725
1726	if (utask->active_uprobe)
1727		put_uprobe(utask->active_uprobe);
1728
1729	ri = utask->return_instances;
1730	while (ri)
1731		ri = free_ret_instance(ri);
1732
1733	xol_free_insn_slot(t);
1734	kfree(utask);
1735	t->utask = NULL;
1736}
1737
1738/*
1739 * Allocate a uprobe_task object for the task if if necessary.
1740 * Called when the thread hits a breakpoint.
1741 *
1742 * Returns:
1743 * - pointer to new uprobe_task on success
1744 * - NULL otherwise
1745 */
1746static struct uprobe_task *get_utask(void)
1747{
1748	if (!current->utask)
1749		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1750	return current->utask;
1751}
1752
1753static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1754{
1755	struct uprobe_task *n_utask;
1756	struct return_instance **p, *o, *n;
1757
1758	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1759	if (!n_utask)
1760		return -ENOMEM;
1761	t->utask = n_utask;
1762
1763	p = &n_utask->return_instances;
1764	for (o = o_utask->return_instances; o; o = o->next) {
1765		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1766		if (!n)
1767			return -ENOMEM;
1768
1769		*n = *o;
1770		get_uprobe(n->uprobe);
1771		n->next = NULL;
1772
1773		*p = n;
1774		p = &n->next;
1775		n_utask->depth++;
1776	}
1777
1778	return 0;
1779}
1780
1781static void uprobe_warn(struct task_struct *t, const char *msg)
1782{
1783	pr_warn("uprobe: %s:%d failed to %s\n",
1784			current->comm, current->pid, msg);
1785}
1786
1787static void dup_xol_work(struct callback_head *work)
1788{
1789	if (current->flags & PF_EXITING)
1790		return;
1791
1792	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1793			!fatal_signal_pending(current))
1794		uprobe_warn(current, "dup xol area");
1795}
1796
1797/*
1798 * Called in context of a new clone/fork from copy_process.
1799 */
1800void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1801{
1802	struct uprobe_task *utask = current->utask;
1803	struct mm_struct *mm = current->mm;
1804	struct xol_area *area;
1805
1806	t->utask = NULL;
1807
1808	if (!utask || !utask->return_instances)
1809		return;
1810
1811	if (mm == t->mm && !(flags & CLONE_VFORK))
1812		return;
1813
1814	if (dup_utask(t, utask))
1815		return uprobe_warn(t, "dup ret instances");
1816
1817	/* The task can fork() after dup_xol_work() fails */
1818	area = mm->uprobes_state.xol_area;
1819	if (!area)
1820		return uprobe_warn(t, "dup xol area");
1821
1822	if (mm == t->mm)
1823		return;
1824
1825	t->utask->dup_xol_addr = area->vaddr;
1826	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1827	task_work_add(t, &t->utask->dup_xol_work, true);
1828}
1829
1830/*
1831 * Current area->vaddr notion assume the trampoline address is always
1832 * equal area->vaddr.
1833 *
1834 * Returns -1 in case the xol_area is not allocated.
 
 
1835 */
1836static unsigned long get_trampoline_vaddr(void)
1837{
1838	struct xol_area *area;
1839	unsigned long trampoline_vaddr = -1;
1840
1841	/* Pairs with xol_add_vma() smp_store_release() */
1842	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1843	if (area)
1844		trampoline_vaddr = area->vaddr;
1845
1846	return trampoline_vaddr;
1847}
1848
1849static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1850					struct pt_regs *regs)
1851{
1852	struct return_instance *ri = utask->return_instances;
1853	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1854
1855	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1856		ri = free_ret_instance(ri);
1857		utask->depth--;
1858	}
1859	utask->return_instances = ri;
1860}
1861
1862static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1863{
1864	struct return_instance *ri;
1865	struct uprobe_task *utask;
1866	unsigned long orig_ret_vaddr, trampoline_vaddr;
1867	bool chained;
1868
1869	if (!get_xol_area())
1870		return;
 
1871
1872	utask = get_utask();
1873	if (!utask)
1874		return;
1875
1876	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1877		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1878				" nestedness limit pid/tgid=%d/%d\n",
1879				current->pid, current->tgid);
1880		return;
1881	}
1882
1883	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1884	if (!ri)
1885		return;
1886
1887	trampoline_vaddr = get_trampoline_vaddr();
1888	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1889	if (orig_ret_vaddr == -1)
1890		goto fail;
1891
1892	/* drop the entries invalidated by longjmp() */
1893	chained = (orig_ret_vaddr == trampoline_vaddr);
1894	cleanup_return_instances(utask, chained, regs);
1895
1896	/*
1897	 * We don't want to keep trampoline address in stack, rather keep the
1898	 * original return address of first caller thru all the consequent
1899	 * instances. This also makes breakpoint unwrapping easier.
1900	 */
1901	if (chained) {
1902		if (!utask->return_instances) {
1903			/*
1904			 * This situation is not possible. Likely we have an
1905			 * attack from user-space.
1906			 */
1907			uprobe_warn(current, "handle tail call");
1908			goto fail;
1909		}
1910		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1911	}
1912
1913	ri->uprobe = get_uprobe(uprobe);
1914	ri->func = instruction_pointer(regs);
1915	ri->stack = user_stack_pointer(regs);
1916	ri->orig_ret_vaddr = orig_ret_vaddr;
1917	ri->chained = chained;
1918
1919	utask->depth++;
1920	ri->next = utask->return_instances;
1921	utask->return_instances = ri;
1922
1923	return;
1924 fail:
1925	kfree(ri);
1926}
1927
1928/* Prepare to single-step probed instruction out of line. */
1929static int
1930pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1931{
1932	struct uprobe_task *utask;
1933	unsigned long xol_vaddr;
1934	int err;
1935
1936	utask = get_utask();
1937	if (!utask)
1938		return -ENOMEM;
1939
1940	xol_vaddr = xol_get_insn_slot(uprobe);
1941	if (!xol_vaddr)
1942		return -ENOMEM;
1943
1944	utask->xol_vaddr = xol_vaddr;
1945	utask->vaddr = bp_vaddr;
1946
1947	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1948	if (unlikely(err)) {
1949		xol_free_insn_slot(current);
1950		return err;
1951	}
1952
1953	utask->active_uprobe = uprobe;
1954	utask->state = UTASK_SSTEP;
1955	return 0;
1956}
1957
1958/*
1959 * If we are singlestepping, then ensure this thread is not connected to
1960 * non-fatal signals until completion of singlestep.  When xol insn itself
1961 * triggers the signal,  restart the original insn even if the task is
1962 * already SIGKILL'ed (since coredump should report the correct ip).  This
1963 * is even more important if the task has a handler for SIGSEGV/etc, The
1964 * _same_ instruction should be repeated again after return from the signal
1965 * handler, and SSTEP can never finish in this case.
1966 */
1967bool uprobe_deny_signal(void)
1968{
1969	struct task_struct *t = current;
1970	struct uprobe_task *utask = t->utask;
1971
1972	if (likely(!utask || !utask->active_uprobe))
1973		return false;
1974
1975	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1976
1977	if (signal_pending(t)) {
1978		spin_lock_irq(&t->sighand->siglock);
1979		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1980		spin_unlock_irq(&t->sighand->siglock);
1981
1982		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1983			utask->state = UTASK_SSTEP_TRAPPED;
1984			set_tsk_thread_flag(t, TIF_UPROBE);
 
1985		}
1986	}
1987
1988	return true;
1989}
1990
1991static void mmf_recalc_uprobes(struct mm_struct *mm)
 
 
 
 
1992{
1993	struct vm_area_struct *vma;
 
1994
1995	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1996		if (!valid_vma(vma, false))
1997			continue;
1998		/*
1999		 * This is not strictly accurate, we can race with
2000		 * uprobe_unregister() and see the already removed
2001		 * uprobe if delete_uprobe() was not yet called.
2002		 * Or this uprobe can be filtered out.
2003		 */
2004		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2005			return;
2006	}
2007
2008	clear_bit(MMF_HAS_UPROBES, &mm->flags);
2009}
2010
2011static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2012{
2013	struct page *page;
2014	uprobe_opcode_t opcode;
2015	int result;
2016
2017	pagefault_disable();
2018	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2019	pagefault_enable();
2020
2021	if (likely(result == 0))
2022		goto out;
2023
2024	/*
2025	 * The NULL 'tsk' here ensures that any faults that occur here
2026	 * will not be accounted to the task.  'mm' *is* current->mm,
2027	 * but we treat this as a 'remote' access since it is
2028	 * essentially a kernel access to the memory.
2029	 */
2030	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
2031			NULL, NULL);
2032	if (result < 0)
2033		return result;
2034
2035	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2036	put_page(page);
2037 out:
2038	/* This needs to return true for any variant of the trap insn */
2039	return is_trap_insn(&opcode);
2040}
2041
2042static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2043{
2044	struct mm_struct *mm = current->mm;
2045	struct uprobe *uprobe = NULL;
2046	struct vm_area_struct *vma;
 
 
 
 
2047
 
 
 
2048	down_read(&mm->mmap_sem);
2049	vma = find_vma(mm, bp_vaddr);
2050	if (vma && vma->vm_start <= bp_vaddr) {
2051		if (valid_vma(vma, false)) {
2052			struct inode *inode = file_inode(vma->vm_file);
2053			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2054
2055			uprobe = find_uprobe(inode, offset);
2056		}
 
2057
2058		if (!uprobe)
2059			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
2060	} else {
2061		*is_swbp = -EFAULT;
2062	}
2063
2064	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2065		mmf_recalc_uprobes(mm);
2066	up_read(&mm->mmap_sem);
2067
2068	return uprobe;
2069}
 
 
 
2070
2071static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2072{
2073	struct uprobe_consumer *uc;
2074	int remove = UPROBE_HANDLER_REMOVE;
2075	bool need_prep = false; /* prepare return uprobe, when needed */
2076
2077	down_read(&uprobe->register_rwsem);
2078	for (uc = uprobe->consumers; uc; uc = uc->next) {
2079		int rc = 0;
2080
2081		if (uc->handler) {
2082			rc = uc->handler(uc, regs);
2083			WARN(rc & ~UPROBE_HANDLER_MASK,
2084				"bad rc=0x%x from %ps()\n", rc, uc->handler);
2085		}
2086
2087		if (uc->ret_handler)
2088			need_prep = true;
2089
2090		remove &= rc;
2091	}
 
 
 
 
2092
2093	if (need_prep && !remove)
2094		prepare_uretprobe(uprobe, regs); /* put bp at return */
2095
2096	if (remove && uprobe->consumers) {
2097		WARN_ON(!uprobe_is_active(uprobe));
2098		unapply_uprobe(uprobe, current->mm);
2099	}
2100	up_read(&uprobe->register_rwsem);
2101}
2102
2103static void
2104handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2105{
2106	struct uprobe *uprobe = ri->uprobe;
2107	struct uprobe_consumer *uc;
2108
2109	down_read(&uprobe->register_rwsem);
2110	for (uc = uprobe->consumers; uc; uc = uc->next) {
2111		if (uc->ret_handler)
2112			uc->ret_handler(uc, ri->func, regs);
2113	}
2114	up_read(&uprobe->register_rwsem);
2115}
2116
2117static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2118{
2119	bool chained;
2120
2121	do {
2122		chained = ri->chained;
2123		ri = ri->next;	/* can't be NULL if chained */
2124	} while (chained);
2125
2126	return ri;
2127}
2128
2129static void handle_trampoline(struct pt_regs *regs)
2130{
2131	struct uprobe_task *utask;
2132	struct return_instance *ri, *next;
2133	bool valid;
2134
2135	utask = current->utask;
2136	if (!utask)
2137		goto sigill;
2138
2139	ri = utask->return_instances;
2140	if (!ri)
2141		goto sigill;
2142
2143	do {
2144		/*
2145		 * We should throw out the frames invalidated by longjmp().
2146		 * If this chain is valid, then the next one should be alive
2147		 * or NULL; the latter case means that nobody but ri->func
2148		 * could hit this trampoline on return. TODO: sigaltstack().
2149		 */
2150		next = find_next_ret_chain(ri);
2151		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2152
2153		instruction_pointer_set(regs, ri->orig_ret_vaddr);
2154		do {
2155			if (valid)
2156				handle_uretprobe_chain(ri, regs);
2157			ri = free_ret_instance(ri);
2158			utask->depth--;
2159		} while (ri != next);
2160	} while (!valid);
2161
2162	utask->return_instances = ri;
2163	return;
2164
2165 sigill:
2166	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2167	force_sig(SIGILL);
2168
2169}
2170
2171bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2172{
2173	return false;
2174}
2175
2176bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2177					struct pt_regs *regs)
2178{
2179	return true;
2180}
2181
2182/*
2183 * Run handler and ask thread to singlestep.
2184 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2185 */
2186static void handle_swbp(struct pt_regs *regs)
2187{
2188	struct uprobe *uprobe;
2189	unsigned long bp_vaddr;
2190	int uninitialized_var(is_swbp);
2191
2192	bp_vaddr = uprobe_get_swbp_addr(regs);
2193	if (bp_vaddr == get_trampoline_vaddr())
2194		return handle_trampoline(regs);
2195
2196	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2197	if (!uprobe) {
2198		if (is_swbp > 0) {
2199			/* No matching uprobe; signal SIGTRAP. */
2200			send_sig(SIGTRAP, current, 0);
2201		} else {
2202			/*
2203			 * Either we raced with uprobe_unregister() or we can't
2204			 * access this memory. The latter is only possible if
2205			 * another thread plays with our ->mm. In both cases
2206			 * we can simply restart. If this vma was unmapped we
2207			 * can pretend this insn was not executed yet and get
2208			 * the (correct) SIGSEGV after restart.
2209			 */
2210			instruction_pointer_set(regs, bp_vaddr);
2211		}
2212		return;
2213	}
2214
2215	/* change it in advance for ->handler() and restart */
2216	instruction_pointer_set(regs, bp_vaddr);
2217
2218	/*
2219	 * TODO: move copy_insn/etc into _register and remove this hack.
2220	 * After we hit the bp, _unregister + _register can install the
2221	 * new and not-yet-analyzed uprobe at the same address, restart.
2222	 */
2223	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2224		goto out;
2225
2226	/*
2227	 * Pairs with the smp_wmb() in prepare_uprobe().
2228	 *
2229	 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2230	 * we must also see the stores to &uprobe->arch performed by the
2231	 * prepare_uprobe() call.
2232	 */
2233	smp_rmb();
2234
2235	/* Tracing handlers use ->utask to communicate with fetch methods */
2236	if (!get_utask())
2237		goto out;
2238
2239	if (arch_uprobe_ignore(&uprobe->arch, regs))
2240		goto out;
2241
2242	handler_chain(uprobe, regs);
2243
2244	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2245		goto out;
2246
2247	if (!pre_ssout(uprobe, regs, bp_vaddr))
2248		return;
2249
2250	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2251out:
2252	put_uprobe(uprobe);
2253}
2254
2255/*
2256 * Perform required fix-ups and disable singlestep.
2257 * Allow pending signals to take effect.
2258 */
2259static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2260{
2261	struct uprobe *uprobe;
2262	int err = 0;
2263
2264	uprobe = utask->active_uprobe;
2265	if (utask->state == UTASK_SSTEP_ACK)
2266		err = arch_uprobe_post_xol(&uprobe->arch, regs);
2267	else if (utask->state == UTASK_SSTEP_TRAPPED)
2268		arch_uprobe_abort_xol(&uprobe->arch, regs);
2269	else
2270		WARN_ON_ONCE(1);
2271
2272	put_uprobe(uprobe);
2273	utask->active_uprobe = NULL;
2274	utask->state = UTASK_RUNNING;
 
2275	xol_free_insn_slot(current);
2276
2277	spin_lock_irq(&current->sighand->siglock);
2278	recalc_sigpending(); /* see uprobe_deny_signal() */
2279	spin_unlock_irq(&current->sighand->siglock);
2280
2281	if (unlikely(err)) {
2282		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2283		force_sig(SIGILL);
2284	}
2285}
2286
2287/*
2288 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2289 * allows the thread to return from interrupt. After that handle_swbp()
2290 * sets utask->active_uprobe.
2291 *
2292 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2293 * and allows the thread to return from interrupt.
 
2294 *
2295 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2296 * uprobe_notify_resume().
2297 */
2298void uprobe_notify_resume(struct pt_regs *regs)
2299{
2300	struct uprobe_task *utask;
2301
2302	clear_thread_flag(TIF_UPROBE);
2303
2304	utask = current->utask;
2305	if (utask && utask->active_uprobe)
 
 
2306		handle_singlestep(utask, regs);
2307	else
2308		handle_swbp(regs);
2309}
2310
2311/*
2312 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2313 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2314 */
2315int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2316{
2317	if (!current->mm)
 
 
 
2318		return 0;
2319
2320	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2321	    (!current->utask || !current->utask->return_instances))
2322		return 0;
2323
2324	set_thread_flag(TIF_UPROBE);
 
 
2325	return 1;
2326}
2327
2328/*
2329 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2330 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2331 */
2332int uprobe_post_sstep_notifier(struct pt_regs *regs)
2333{
2334	struct uprobe_task *utask = current->utask;
2335
2336	if (!current->mm || !utask || !utask->active_uprobe)
2337		/* task is currently not uprobed */
2338		return 0;
2339
2340	utask->state = UTASK_SSTEP_ACK;
2341	set_thread_flag(TIF_UPROBE);
2342	return 1;
2343}
2344
2345static struct notifier_block uprobe_exception_nb = {
2346	.notifier_call		= arch_uprobe_exception_notify,
2347	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2348};
2349
2350void __init uprobes_init(void)
2351{
2352	int i;
2353
2354	for (i = 0; i < UPROBES_HASH_SZ; i++)
 
2355		mutex_init(&uprobes_mmap_mutex[i]);
 
 
2356
2357	BUG_ON(register_die_notifier(&uprobe_exception_nb));
 
 
 
 
 
2358}